hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
b44bb1e0c38b6b47d2c88ed2c3848fb78c72761d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if GOOGLE_CUDA #include "ew_op_gpu.h" #include <stdio.h> typedef struct __align__(16) LutEntry { int offsetX; int offsetW; float gate; float unused; } LutEntry; template <bool Fprop, typename TW, typename TX, typename TY> __global__ void __launch_bounds__(32) gemm_blocksparse_gated_08x64x08x8_xprop( const int2* __restrict__ Lut, const float* __restrict__ Gate, const TW* __restrict__ W, const TX* __restrict__ X, TY* Y, int* Lock, int locks, int N /* N is in units of groups of 8 elements each (N/8) */) { if (Fprop) asm(".shared .align 16 .b32 share[576];" ::); // 576 = 8*8 + 64*8 else asm(".shared .align 16 .b32 share[608];" ::); // 608 = 12*8 + 64*8 extern __shared__ LutEntry Lut4_s[]; LutEntry* Lut4s = &Lut4_s[Fprop ? 576/4 : 608/4]; int tid = threadIdx.x; int idx_N = blockIdx.x; int idx_L = blockIdx.y; int4 lut_head = ((const int4*)Lut)[idx_L]; int tid7 = tid & 7; int tid8 = tid >> 3; int tid16 = tid & 16; int readXs = ((tid >> 1) & 7) << 4; int readWs = (tid & 1) << 4; // second half of warp starts 4 rows down readXs += tid16 << 6; // 64*4*4 readWs += tid16 << 3; // 8*4*4 int storXs = (tid8*64 + tid7*4) << 2; int storWs; if (Fprop) storWs = tid << 3; else { // Transpose weights on store to shared // Avoid bank conflicts by shifting writes over by 8 every 2 rows (+tid3*8) int tid3 = tid & 3; int tid4 = tid >> 2; storWs = (tid3*8*2 + tid4 + tid3*8) << 2; readWs += tid16 << 2; // shift over 8 floats every 2 rows, second half of warp starts 4 rows down } int n = idx_N*8 + tid7; bool bn = n < N; int offsetX = (tid8*N + n)*8*2; // unpack lut header int lut_offset = lut_head.x; int lut_size = lut_head.y; int idx_K = lut_head.z; int idx_Lock = lut_head.w; int N8 = N*8*8*2; // 8 lines, 8 elements per index, two bytes per element uint dep_thd_mask = 0xffffffff; dep_thd_mask >>= 32 - tid; int new_lut_size = 0; // prefetch the lut data into shared Lut += lut_offset; #pragma unroll 1 for (int i = tid; i < lut_size; i += 32) { LutEntry entry; *(int2*)&entry = Lut[i]; entry.gate = Gate[entry.offsetW]; // only add the entry to the lut if the gate is non-zero // compiler is stupid about reusing predicate here so use asm uint ballot, warp_non_zero; asm volatile ("{\n\t" ".reg .pred p; \n\t" ".reg .u32 ballot; \n\t" "setp.ne.ftz.f32 p, %2, 0f00000000; \n\t" # if TORCH_HIP_VERSION >= 9020 "vote.sync.ballot.b32 ballot, p, 0xffffffff;\n\t" # else "vote.ballot.b32 ballot, p; \n\t" # endif "mov.b32 %0, ballot; \n\t" "popc.b32 %1, ballot; \n\t" "@!p bra GATE_ZERO; \n\t" "}" : "=r"(ballot), "=r"(warp_non_zero) : "f"(entry.gate)); { uint dep_thd_cnt = __popc(dep_thd_mask & ballot); entry.unused = 0; entry.offsetX *= N8; // 8 lines of N per block entry.offsetW *= 64*2; // 64 entries of W per block, 2 bytes each Lut4s[new_lut_size + dep_thd_cnt] = entry; } asm volatile ("\nGATE_ZERO:\n" ::); new_lut_size += warp_non_zero; } //lut_size = new_lut_size; # if TORCH_HIP_VERSION >= 9020 asm volatile ("shfl.sync.idx.b32 %0, %1, 0, 0x1f, 0xffffffff;" : "=r"(lut_size) : "r"(new_lut_size)); # else asm volatile ("shfl.idx.b32 %0, %1, 0, 0x1f;" : "=r"(lut_size) : "r"(new_lut_size)); # endif // zero accumulation registers float regY[4][8]; for (int w = 0; w < 4; w++) for (int x = 0; x < 8; x++) // use asm here to ensure this happens after lut table construction and before main loop skipping asm volatile ("mov.b32 %0, 0;" : "=f"(regY[w][x]) :); // skip loop if empty lut // Compiler generates suboptimal code if a simple "for loop" is used. asm volatile (".reg .pred lut_zero; \n\t" "setp.eq.u32 lut_zero, %0, 0; \n\t" "@lut_zero bra.uni END_LOOP;" :: "r"(lut_size)); int i = 0; do { LutEntry entry = Lut4s[i++]; entry.offsetX += offsetX; entry.offsetW += tid*4; const TW* W0; const TX* X0; const TX* X4; // Simplify pointer arithmatic by letting compiler assume all offsets fit in 32 bits. asm("{\n\t" ".reg .u64 x0, x4, w0;\n\t" "mov.b64 w0, {%5, 0};\n\t" "mov.b64 x0, {%6, 0};\n\t" "mov.b64 x4, {%7, 0};\n\t" "add.u64 %0, w0, %3;\n\t" "add.u64 %1, x0, %4;\n\t" "add.u64 %2, x4, %4;\n\t" "}" : "=l"(W0),"=l"(X0),"=l"(X4) : "l"(W), "l"(X), "r"(entry.offsetW), "r"(entry.offsetX), "r"(entry.offsetX + N*4*8*2) ); // Fetch 8 rows at a time from W and X float2 w0 = load(W0, 0); float8 x0 = load(X0, 0, bn); float8 x4 = load(X4, 0, bn); w0 = ew_mul(w0, entry.gate); // store to shared. if (Fprop) st_shared_v2(storWs + 64*8*4, w0); else { // transpose the shared store of W st_shared_v1(storWs + 0*4 + 64*8*4, w0.x); st_shared_v1(storWs + 8*4 + 64*8*4, w0.y); } st_shared_v4(storXs + (0*64 + 0*32)*4, x0.a); st_shared_v4(storXs + (0*64 + 1*32)*4, x0.b); st_shared_v4(storXs + (4*64 + 0*32)*4, x4.a); st_shared_v4(storXs + (4*64 + 1*32)*4, x4.b); // computes an 8x64x8 gemm tile with 4x8 register blocking float regW[4]; float regX[8]; #pragma unroll for (int j = 0; j < 4; j++) { // fetch outer product data ld_shared_v4(readWs + ( 8*j + 64*8 + (Fprop ? 0 : (j>>1)*8))*4, regW ); // shift over 8 floats every 2 rows ld_shared_v4(readXs + (64*j + 0)*4, &regX[0] ); ld_shared_v4(readXs + (64*j + 32)*4, &regX[4] ); // accumulate outer product for (int w = 0; w < 4; w++) for (int x = 0; x < 8; x++) regY[w][x] += regW[w] * regX[x]; } } while (i < lut_size); asm volatile ("\nEND_LOOP:\n":: ); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_N) :); int tidN = (tid >> 1) & 7; int tidK = tid & 1; int tidk = tid >> 4; bool t16 = tid16 != 0; float outY[2][8]; for (int w = 0; w < 2; w++) { for (int x = 0; x < 8; x++) { float swap = t16 ? regY[2*w + 0][x] : regY[2*w + 1][x]; outY[w][x] = t16 ? regY[2*w + 1][x] : regY[2*w + 0][x]; outY[w][x] += shfl_xor(swap, 16); } } n = idx_N*64/8 + tidN; bn = n < N; Y += (idx_K*8 + tidK*4 + tidk)*N + n; if (idx_Lock == 0) { // no lock needed just write out the results store(Y, *(float8*)outY[0], N*0, bn); store(Y, *(float8*)outY[1], N*2, bn); } else { int offsetL = idx_N*locks + idx_Lock - 1; Lock += offsetL; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncwarp(); int offsetC = locks*gridDim.x; int* Count = Lock + offsetC; int count = *Count; __syncwarp(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory store(Y, *(float8*)outY[0], N*0, bn); store(Y, *(float8*)outY[1], N*2, bn); __threadfence(); __syncwarp(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } else { // subsequent blocks must accumulate float8 y0 = load_c(Y, N*0, bn); float8 y2 = load_c(Y, N*2, bn); y0 = ew_add(y0, *(float8*)outY[0]); y2 = ew_add(y2, *(float8*)outY[1]); store(Y, y0, N*0, bn); store(Y, y2, N*2, bn); __threadfence(); __syncwarp(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } } template <bool Fprop, typename TW, typename TX, typename TY> __global__ void __launch_bounds__(32) gemm_blocksparse_gated_08x64x08x4_xprop( const int2* __restrict__ Lut, const float* __restrict__ Gate, const TW* __restrict__ W, const TX* __restrict__ X, TY* Y, int* Lock, int locks, int N) { __shared__ float shrX1[64*8]; __shared__ float shrW1[Fprop ? 8*8 : 12*8]; float2* shrW2 = (float2*)shrW1; float2* shrX2 = (float2*)shrX1; float4* shrX4 = (float4*)shrX1; extern __shared__ LutEntry Lut4s[]; int tid = threadIdx.x; int idx_N = blockIdx.x; int idx_L = blockIdx.y; int4 lut_head = ((const int4*)Lut)[idx_L]; int tid15 = tid & 15; int tid16 = tid >> 4; float2* storW2; float* storW1; if (Fprop) storW2 = &shrW2[tid]; else { int tid3 = tid & 3; int tid4 = tid >> 2; // Transpose weights on store to shared // Avoid bank conflicts by shifting writes over by 8 every 2 rows (+tid3*8) storW1 = &shrW1[tid3*16 + tid4 + tid3*8]; } float4* storX4 = &shrX4[tid]; // float2* readX2 = &shrX2[tid15]; // float2* readW2 = &shrW2[tid16]; float2* readX2 = &shrX2[tid >> 1]; float2* readW2 = &shrW2[tid & 1]; int N4 = N >> 2; int N2 = N4 << 1; int N8 = N4 << 3; int n4 = idx_N*16 + tid15; bool bn = n4 < N4; const TX* X0 = X + tid16*N4 + n4; const TX* X2 = X0 + N2; const TX* X4 = X2 + N2; const TX* X6 = X4 + N2; const TW* W0 = W + tid; // unpack lut header int lut_offset = lut_head.x; int lut_size = lut_head.y; int idx_K = lut_head.z; int idx_Lock = lut_head.w; //printf("%d %2d %d\n", idx_K, tid, lut_size); uint dep_thd_mask = 0xffffffff; dep_thd_mask >>= 32 - tid; int new_lut_size = 0; // prefetch the lut data into shared Lut += lut_offset; #pragma unroll 1 for (int i = tid; i < lut_size; i += 32) { //printf("%d %2d %d %d %d\n", idx_K, tid, i, lut_size, new_lut_size); LutEntry entry; *(int2*)&entry = Lut[i]; entry.gate = Gate[entry.offsetW]; // only add the entry to the lut if the gate is non-zero bool gate_non_zero = entry.gate != 0.0f; //uint gate_ballot = __ballot_sync(0xffffffff, gate_non_zero); uint gate_ballot = __ballot(gate_non_zero); uint warp_non_zero = __popc(gate_ballot); if (gate_non_zero) { uint dep_thd_cnt = __popc(dep_thd_mask & gate_ballot); entry.unused = 0; entry.offsetX *= N8; entry.offsetW *= 32; Lut4s[new_lut_size + dep_thd_cnt] = entry; } new_lut_size += warp_non_zero; } // lut_size = new_lut_size; // # if TORCH_HIP_VERSION >= 9020 // lut_size = __shfl_sync(0xffffffff, new_lut_size, 0, 32); // # else lut_size = __shfl(new_lut_size, 0, 32); // # endif //printf("%d %2d %d\n", idx_K, tid, lut_size); // zero accumulation registers float regY[4][4]; for (int w = 0; w < 4; w++) for (int x = 0; x < 4; x++) asm volatile ("mov.b32 %0, 0;" : "=f"(regY[w][x]) :); // skip loop if empty lut // Compiler generates suboptimal code if a simple "for loop" is used. asm volatile (".reg .pred lut_zero; \n\t" "setp.eq.u32 lut_zero, %0, 0; \n\t" "@lut_zero bra.uni END_LOOP;" :: "r"(lut_size)); // loop over each lut entry to compute a gemm block int i = 0; #pragma unroll 1 do { LutEntry entry = Lut4s[i++]; // Fetch 8 rows at a time from W and X TW w0; TX x0, x2, x4, x6; w0 = W0[entry.offsetW]; if (bn) { x0 = X0[entry.offsetX]; x2 = X2[entry.offsetX]; x4 = X4[entry.offsetX]; x6 = X6[entry.offsetX]; } // Convert to float if needed and store to shared. if (Fprop) storW2[0] = ew_mul(to_float(w0), entry.gate); else { // transpose the shared store of W float2 w2 = ew_mul(to_float(w0), entry.gate); storW1[0] = w2.x; storW1[8] = w2.y; } storX4[0*16] = to_float(x0); storX4[2*16] = to_float(x2); storX4[4*16] = to_float(x4); storX4[6*16] = to_float(x6); float regX[4]; float regW[4]; // computes an 8x64x8 gemm block #pragma unroll for (int j = 0; j < 8; j++) { // fetch outer product data *(float2*)&regX[0] = readX2[32*j + 0]; *(float2*)&regW[0] = readW2[ 4*j + 0 + (Fprop ? 0 : (j>>1)*4)]; // shift over 8 floats every 2 rows *(float2*)&regX[2] = readX2[32*j + 16]; *(float2*)&regW[2] = readW2[ 4*j + 2 + (Fprop ? 0 : (j>>1)*4)]; // accumulate outer product for (int w = 0; w < 4; w++) for (int x = 0; x < 4; x++) regY[w][x] += regW[w] * regX[x]; } } while (i < lut_size); asm volatile ("\nEND_LOOP:\n":: ); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_N) :); // tid = threadIdx.x; // idx_N = blockIdx.x; tid15 = tid >> 1; tid16 = tid & 1; N2 = N >> 1; int n = idx_N*32 + tid15; int yi[4]; yi[0] = (idx_K*8 + tid16*2)*N2 + n; yi[1] = yi[0] + N2; yi[2] = yi[0] + N2*4; yi[3] = yi[2] + N2; //printf("K:%2d N:%d tid:%2d t15:%2d t16:%2d N2:%2d n:%2d yi:%d\n", idx_K, idx_N, tid, tid15, tid16, N2, n, yi[0]); bool bn0 = n+0 < N2; bool bn16 = n+16 < N2; if (idx_Lock == 0) { // no lock needed just write out the results for (int i = 0; i < 4; i++) { store(Y, *(float2*)&regY[i][0], yi[i]+0, bn0 ); store(Y, *(float2*)&regY[i][2], yi[i]+16, bn16); } } else { int offsetL = idx_N*locks + idx_Lock - 1; Lock += offsetL; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncwarp(); int offsetC = locks*gridDim.x; int* Count = Lock + offsetC; int count = *Count; __syncwarp(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory for (int i = 0; i < 4; i++) { store(Y, *(float2*)&regY[i][0], yi[i]+0, bn0 ); store(Y, *(float2*)&regY[i][2], yi[i]+16, bn16); } __threadfence(); __syncwarp(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } else { // subsequent blocks must accumulate float2 y[8]; for (int i = 0; i < 4; i++) { y[i + 0] = load_c(Y, yi[i]+0, bn0 ); y[i + 4] = load_c(Y, yi[i]+16, bn16); y[i + 0].x += regY[i][0]; y[i + 0].y += regY[i][1]; y[i + 4].x += regY[i][2]; y[i + 4].y += regY[i][3]; } for (int i = 0; i < 4; i++) { store(Y, y[i + 0], yi[i]+0, bn0 ); store(Y, y[i + 4], yi[i]+16, bn16); } __threadfence(); __syncwarp(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } } template <typename TX, typename TE, typename TU> __global__ void __launch_bounds__(32) gemm_blocksparse_gated_08x64x08x8_updat( struct Plist<TX,8> X, struct Plist<TE,8> E, const int2* __restrict__ Lut, const float* __restrict__ Gate, TU* U, int params8, int N, int loops, float alpha, float beta) { // add padding for bank-conflict-free stores //__shared__ float2 shrU2[64*8 + 4*8]; // add padding for bank-conflict-free stores asm(".shared .align 16 .b32 share[1088];" ::); // 1088 = (64*8 + 4*8)*2 extern __shared__ float2 shrU2[]; int tid = threadIdx.x; int bid = blockIdx.x; float gate = Gate[bid]; int2 lut_head = Lut[bid]; int tid7 = tid & 7; int tid8 = tid >> 3; int tid1 = tid & 1; int tid2 = (tid >> 1) & 1; int tid4 = tid >> 2; // avoid bank conflicts when writing transpose (+tid7*4) int storeS = tid7*8*8 + tid8 + tid7*4; // 4 threads read blocks of 8x8 each shifted over by 4 int iread = tid4*8*8 + tid4*4; int readXs = iread + tid2*2; int readEs = iread + tid1*2 + 64*8 + 4*8; int idx_X = lut_head.x; int idx_E = lut_head.y; int offsetX = (idx_X*8 + tid8)*N + tid7*8; int offsetE = (idx_E*8 + tid8)*N + tid7*8; int N4 = N; // exit kernel if gate is zero asm ("{\n\t" ".reg .pred p; \n\t" "setp.eq.ftz.f32 p, %0, 0f00000000; \n\t" "@p exit; \n\t" "}" :: "f"(gate)); // This keeps all prior logic outside of the loops. asm("shl.b32 %0, %0, 3;" : "+r"(N4) : ); asm("shl.b32 %0, %0, 2;" : "+r"(storeS) : ); asm("shl.b32 %0, %0, 2;" : "+r"(readXs) : ); asm("shl.b32 %0, %0, 2;" : "+r"(readEs) : ); asm("shl.b32 %0, %0, 1;" : "+r"(offsetX) : ); asm("shl.b32 %0, %0, 1;" : "+r"(offsetE) : ); float regU[4][4]; //[x][e] for (int x = 0; x < 4; x++) for (int e = 0; e < 4; e++) regU[x][e] = 0; int p = 0; #pragma unroll 1 do { const TX* X0; const TE* E0; asm("{\n\t" ".reg .u64 X, E, offsetX, offsetE;\n\t" # if __CUDA_ARCH__ >= 700 "ld.param.u64 X, [%2 + 0x160];\n\t" // WARNING: hard coded param offset. "ld.param.u64 E, [%2 + 0x1a0];\n\t" // WARNING: hard coded param offset. # else "ld.param.u64 X, [%2 + 0x140];\n\t" // WARNING: hard coded param offset. "ld.param.u64 E, [%2 + 0x180];\n\t" // WARNING: hard coded param offset. # endif "cvta.to.global.u64 X, X;\n\t" "cvta.to.global.u64 E, E;\n\t" "mov.b64 offsetX, {%3, 0};\n\t" "mov.b64 offsetE, {%4, 0};\n\t" "add.u64 %0, X, offsetX;\n\t" "add.u64 %1, E, offsetE;\n\t" "}" : "=l"(X0), "=l"(E0) : "r"(p), "r"(offsetX), "r"(offsetE)); p += 8; int n = (tid & 7) << 3; int loop = 0; #pragma unroll 1 do { const TX* X4; const TE* E4; asm("{\n\t" ".reg .u64 N4;\n\t" "mov.b64 N4, {%4, 0};\n\t" "add.u64 %0, N4, %2;\n\t" "add.u64 %1, N4, %3;\n\t" "}" : "=l"(X4),"=l"(E4) : "l"(X0), "l"(E0), "r"(N4) ); TX x0, x4; TE e0, e4; ew_zero(x0); ew_zero(x4); ew_zero(e0); ew_zero(e4); if (n < N) { x0 = __ldg(X0); x4 = __ldg(X4); e0 = __ldg(E0); e4 = __ldg(E4); } // Convert to float if needed and store to shared as transpose. float8 fx0 = to_float(x0); float8 fx4 = to_float(x4); // advance pointer by 64*2 asm ("add.u64 %0, %0, 128;" : "+l"(X0)); n += 64; st_shared_v1(storeS + (0*8 + 0 + (64*8 + 4*8)*0)*4, fx0.a.x); st_shared_v1(storeS + (1*8 + 0 + (64*8 + 4*8)*0)*4, fx0.a.y); st_shared_v1(storeS + (2*8 + 0 + (64*8 + 4*8)*0)*4, fx0.a.z); st_shared_v1(storeS + (3*8 + 0 + (64*8 + 4*8)*0)*4, fx0.a.w); st_shared_v1(storeS + (4*8 + 0 + (64*8 + 4*8)*0)*4, fx0.b.x); st_shared_v1(storeS + (5*8 + 0 + (64*8 + 4*8)*0)*4, fx0.b.y); st_shared_v1(storeS + (6*8 + 0 + (64*8 + 4*8)*0)*4, fx0.b.z); st_shared_v1(storeS + (7*8 + 0 + (64*8 + 4*8)*0)*4, fx0.b.w); st_shared_v1(storeS + (0*8 + 4 + (64*8 + 4*8)*0)*4, fx4.a.x); st_shared_v1(storeS + (1*8 + 4 + (64*8 + 4*8)*0)*4, fx4.a.y); st_shared_v1(storeS + (2*8 + 4 + (64*8 + 4*8)*0)*4, fx4.a.z); st_shared_v1(storeS + (3*8 + 4 + (64*8 + 4*8)*0)*4, fx4.a.w); st_shared_v1(storeS + (4*8 + 4 + (64*8 + 4*8)*0)*4, fx4.b.x); st_shared_v1(storeS + (5*8 + 4 + (64*8 + 4*8)*0)*4, fx4.b.y); st_shared_v1(storeS + (6*8 + 4 + (64*8 + 4*8)*0)*4, fx4.b.z); st_shared_v1(storeS + (7*8 + 4 + (64*8 + 4*8)*0)*4, fx4.b.w); float8 fe0 = to_float(e0); float8 fe4 = to_float(e4); // advance pointer by 64*2 asm ("add.u64 %0, %0, 128;" : "+l"(E0)); st_shared_v1(storeS + (0*8 + 0 + (64*8 + 4*8)*1)*4, fe0.a.x); st_shared_v1(storeS + (1*8 + 0 + (64*8 + 4*8)*1)*4, fe0.a.y); st_shared_v1(storeS + (2*8 + 0 + (64*8 + 4*8)*1)*4, fe0.a.z); st_shared_v1(storeS + (3*8 + 0 + (64*8 + 4*8)*1)*4, fe0.a.w); st_shared_v1(storeS + (4*8 + 0 + (64*8 + 4*8)*1)*4, fe0.b.x); st_shared_v1(storeS + (5*8 + 0 + (64*8 + 4*8)*1)*4, fe0.b.y); st_shared_v1(storeS + (6*8 + 0 + (64*8 + 4*8)*1)*4, fe0.b.z); st_shared_v1(storeS + (7*8 + 0 + (64*8 + 4*8)*1)*4, fe0.b.w); st_shared_v1(storeS + (0*8 + 4 + (64*8 + 4*8)*1)*4, fe4.a.x); st_shared_v1(storeS + (1*8 + 4 + (64*8 + 4*8)*1)*4, fe4.a.y); st_shared_v1(storeS + (2*8 + 4 + (64*8 + 4*8)*1)*4, fe4.a.z); st_shared_v1(storeS + (3*8 + 4 + (64*8 + 4*8)*1)*4, fe4.a.w); st_shared_v1(storeS + (4*8 + 4 + (64*8 + 4*8)*1)*4, fe4.b.x); st_shared_v1(storeS + (5*8 + 4 + (64*8 + 4*8)*1)*4, fe4.b.y); st_shared_v1(storeS + (6*8 + 4 + (64*8 + 4*8)*1)*4, fe4.b.z); st_shared_v1(storeS + (7*8 + 4 + (64*8 + 4*8)*1)*4, fe4.b.w); float regX[4]; float regE[4]; #pragma unroll for (int j = 0; j < 8; j++) { // fetch outer product data ld_shared_v2(readXs + (8*j + 0)*4, &regX[0] ); ld_shared_v2(readEs + (8*j + 0)*4, &regE[0] ); ld_shared_v2(readXs + (8*j + 4)*4, &regX[2] ); ld_shared_v2(readEs + (8*j + 4)*4, &regE[2] ); for (int x = 0; x < 4; x++) for (int e = 0; e < 4; e++) regU[x][e] += regX[x] * regE[e]; } loop++; } while (loop < loops); } while (p < params8); tid = threadIdx.x; bid = blockIdx.x; int offset = bid*32 + tid; TU t2; ew_zero(t2); if (beta != 0.0f) t2 = U[offset]; tid1 = tid & 1; tid2 = (tid >> 1) & 1; tid4 = (tid & -4) << 3; float2* storU2 = &shrU2[tid4 + tid2*4*2 + tid1]; storU2[0*4 + 0] = *(float2*)&regU[0][0]; storU2[0*4 + 2] = *(float2*)&regU[0][2]; storU2[1*4 + 0] = *(float2*)&regU[1][0]; storU2[1*4 + 2] = *(float2*)&regU[1][2]; storU2[4*4 + 0] = *(float2*)&regU[2][0]; storU2[4*4 + 2] = *(float2*)&regU[2][2]; storU2[5*4 + 0] = *(float2*)&regU[3][0]; storU2[5*4 + 2] = *(float2*)&regU[3][2]; float2* readU2 = &shrU2[tid]; float2 u[8]; for (int i = 0; i < 8; i++) u[i] = readU2[i*32]; // Tree reduce for (int j = 4; j > 0; j >>= 1) for (int i = 0; i < j; i++) { u[i].x = u[i].x + u[i+j].x; u[i].y = u[i].y + u[i+j].y; } float2 u2 = *(float2*)u; float2 b2 = to_float(t2); //alpha *= gate; u2.x = alpha*u2.x + beta*b2.x; u2.y = alpha*u2.y + beta*b2.y; store(U, u2, offset); } template <typename TX, typename TE, typename TU> __global__ void __launch_bounds__(32) gemm_blocksparse_gated_08x64x08x4_updat( struct Plist<TX,8> X, struct Plist<TE,8> E, const int2* __restrict__ Lut, const float* __restrict__ Gate, TU* U, int params8, int N, int loops, float alpha, float beta) { __shared__ float shrX1[64*8 + 2*16]; // add padding for bank-conflict-free stores __shared__ float shrE1[64*8 + 2*16]; float2* shrU2 = (float2*)shrX1; float2* shrX2 = (float2*)shrX1; float2* shrE2 = (float2*)shrE1; int tid = threadIdx.x; int bid = blockIdx.x; float gate = Gate[bid]; int2 lut_head = Lut[bid]; int tid15 = tid & 15; int tid16 = tid >> 4; int tid1 = tid & 1; int tid2 = (tid >> 1) & 1; int tid4 = tid >> 2; // avoid bank conflicts when writing transpose (+tid15*2) int istore = tid15*8*4 + tid16 + tid15*2; float* storX = &shrX1[istore]; float* storE = &shrE1[istore]; // 4 threads read blocks of 8x8 each shifted over by 4 (2 shifts of 2 from store) int iread = tid4*4*8 + tid4*2; float2* readX2 = &shrX2[iread + tid2]; float2* readE2 = &shrE2[iread + tid1]; int N4 = N >> 2; int N2 = N4 << 1; int idx_X = lut_head.x; int idx_E = lut_head.y; int offsetX = (idx_X*8 + tid16)*N4; int offsetE = (idx_E*8 + tid16)*N4; // exit kernel if gate is zero asm ("{\n\t" ".reg .pred p; \n\t" "setp.eq.ftz.f32 p, %0, 0f00000000; \n\t" "@p exit; \n\t" "}" :: "f"(gate)); float regX[4]; float regE[4]; float regU[4][4]; //[x][e] for (int x = 0; x < 4; x++) for (int e = 0; e < 4; e++) regU[x][e] = 0; int p = 0; #pragma unroll 1 do { int n = tid15; const TX* X0; const TE* E0; asm("{\n\t" ".reg .u64 X, E;\n\t" # if __CUDA_ARCH__ >= 700 "ld.param.u64 X, [%2 + 0x160];\n\t" // WARNING: hard coded param offset. "ld.param.u64 E, [%2 + 0x1a0];\n\t" // WARNING: hard coded param offset. # else "ld.param.u64 X, [%2 + 0x140];\n\t" // WARNING: hard coded param offset. "ld.param.u64 E, [%2 + 0x180];\n\t" // WARNING: hard coded param offset. # endif "cvta.to.global.u64 %0, X;\n\t" "cvta.to.global.u64 %1, E;\n\t" "}" : "=l"(X0), "=l"(E0) : "r"(p)); p += 8; X0 += offsetX; E0 += offsetE; const TX* X2 = X0 + N2; const TX* X4 = X2 + N2; const TX* X6 = X4 + N2; const TE* E2 = E0 + N2; const TE* E4 = E2 + N2; const TE* E6 = E4 + N2; #pragma unroll 1 for (int i = 0; i < loops; i++) { bool bn = n < N4; TX x0, x2, x4, x6; TE e0, e2, e4, e6; ew_zero(x0); ew_zero(x2); ew_zero(x4); ew_zero(x6); ew_zero(e0); ew_zero(e2); ew_zero(e4); ew_zero(e6); if (bn) { x0 = __ldg(X0+n); x2 = __ldg(X2+n); x4 = __ldg(X4+n); x6 = __ldg(X6+n); e0 = __ldg(E0+n); e2 = __ldg(E2+n); e4 = __ldg(E4+n); e6 = __ldg(E6+n); } n += 16; // Convert to float if needed and store to shared as transpose. float4 fx0 = to_float(x0); float4 fx2 = to_float(x2); float4 fx4 = to_float(x4); float4 fx6 = to_float(x6); storX[0*8 + 0] = fx0.x; storX[1*8 + 0] = fx0.y; storX[2*8 + 0] = fx0.z; storX[3*8 + 0] = fx0.w; storX[0*8 + 2] = fx2.x; storX[1*8 + 2] = fx2.y; storX[2*8 + 2] = fx2.z; storX[3*8 + 2] = fx2.w; storX[0*8 + 4] = fx4.x; storX[1*8 + 4] = fx4.y; storX[2*8 + 4] = fx4.z; storX[3*8 + 4] = fx4.w; storX[0*8 + 6] = fx6.x; storX[1*8 + 6] = fx6.y; storX[2*8 + 6] = fx6.z; storX[3*8 + 6] = fx6.w; float4 fe0 = to_float(e0); float4 fe2 = to_float(e2); float4 fe4 = to_float(e4); float4 fe6 = to_float(e6); storE[0*8 + 0] = fe0.x; storE[1*8 + 0] = fe0.y; storE[2*8 + 0] = fe0.z; storE[3*8 + 0] = fe0.w; storE[0*8 + 2] = fe2.x; storE[1*8 + 2] = fe2.y; storE[2*8 + 2] = fe2.z; storE[3*8 + 2] = fe2.w; storE[0*8 + 4] = fe4.x; storE[1*8 + 4] = fe4.y; storE[2*8 + 4] = fe4.z; storE[3*8 + 4] = fe4.w; storE[0*8 + 6] = fe6.x; storE[1*8 + 6] = fe6.y; storE[2*8 + 6] = fe6.z; storE[3*8 + 6] = fe6.w; #pragma unroll for (int j = 0; j < 8; j++) { // shift over 2 floats every 4 rows *(float2*)&regX[0] = readX2[4*j + 0 + (j>>2)]; *(float2*)&regE[0] = readE2[4*j + 0 + (j>>2)]; *(float2*)&regX[2] = readX2[4*j + 2 + (j>>2)]; *(float2*)&regE[2] = readE2[4*j + 2 + (j>>2)]; for (int x = 0; x < 4; x++) for (int e = 0; e < 4; e++) regU[x][e] += regX[x] * regE[e]; } } } while (p < params8); tid = threadIdx.x; bid = blockIdx.x; int offset = bid*32 + tid; TU t2; ew_zero(t2); if (beta != 0.0f) t2 = U[offset]; tid1 = tid & 1; tid2 = (tid >> 1) & 1; tid4 = (tid & -4) << 3; float2* storU2 = &shrU2[tid4 + tid2*4*2 + tid1]; storU2[0*4 + 0] = *(float2*)&regU[0][0]; storU2[0*4 + 2] = *(float2*)&regU[0][2]; storU2[1*4 + 0] = *(float2*)&regU[1][0]; storU2[1*4 + 2] = *(float2*)&regU[1][2]; storU2[4*4 + 0] = *(float2*)&regU[2][0]; storU2[4*4 + 2] = *(float2*)&regU[2][2]; storU2[5*4 + 0] = *(float2*)&regU[3][0]; storU2[5*4 + 2] = *(float2*)&regU[3][2]; float2* readU2 = &shrU2[tid]; float2 u[8]; for (int i = 0; i < 8; i++) u[i] = readU2[i*32]; // Tree reduce for (int j = 4; j > 0; j >>= 1) for (int i = 0; i < j; i++) { u[i].x = u[i].x + u[i+j].x; u[i].y = u[i].y + u[i+j].y; } float2 u2 = *(float2*)u; float2 b2 = to_float(t2); //alpha *= gate; u2.x = alpha*u2.x + beta*b2.x; u2.y = alpha*u2.y + beta*b2.y; store(U, u2, offset); } template <bool Fprop, CTYPE(T)> hipError_t BsmmGatedXprop_CN(const T* X, const T* W, T* Y, bsmm_params* params) { dim3 grid(CEIL_DIV(params->N, 64), params->segments, 1); // printf("grid: %d %d\n", grid.x, grid.y); const int2* L2 = (const int2*)params->Lut; const T2* W2 = (const T2*)W; const T4* X4 = (const T4*)X; const T8* X8 = (const T8*)X; T2* Y2 = ( T2*)Y; T8* Y8 = ( T8*)Y; if (params->locks > 0) hipMemsetAsync((hipDeviceptr_t)params->Lock, 0, grid.x * params->locks * 2, params->stream); if (params->bsize == 8) { if (sizeof(T) == 2 && (params->N & 7) == 0) hipLaunchKernelGGL(( gemm_blocksparse_gated_08x64x08x8_xprop<Fprop,T2,T8,T8>), dim3(grid),dim3(32),params->shared*2,params->stream, L2, params->Gate, W2, X8, Y8, params->Lock, params->locks, params->N>>3); else hipLaunchKernelGGL(( gemm_blocksparse_gated_08x64x08x4_xprop<Fprop,T2,T4,T2>), dim3(grid),dim3(32),params->shared*2,params->stream, L2, params->Gate, W2, X4, Y2, params->Lock, params->locks, params->N); } return hipPeekAtLastError(); } template hipError_t BsmmGatedXprop_CN<true, VTYPE(float)>(const float* X, const float* W, float* Y, bsmm_params* params); template hipError_t BsmmGatedXprop_CN<true, VTYPE(ehalf)>(const ehalf* X, const ehalf* W, ehalf* Y, bsmm_params* params); template hipError_t BsmmGatedXprop_CN<true, VTYPE(bhalf)>(const bhalf* X, const bhalf* W, bhalf* Y, bsmm_params* params); template hipError_t BsmmGatedXprop_CN<false, VTYPE(float)>(const float* X, const float* W, float* Y, bsmm_params* params); template hipError_t BsmmGatedXprop_CN<false, VTYPE(ehalf)>(const ehalf* X, const ehalf* W, ehalf* Y, bsmm_params* params); template hipError_t BsmmGatedXprop_CN<false, VTYPE(bhalf)>(const bhalf* X, const bhalf* W, bhalf* Y, bsmm_params* params); template <CTYPE(T)> hipError_t BsmmGatedUpdat_CN(const T* X, const T* E, T* U, bsmm_params* params) { dim3 grid(params->blocks, 1, 1); int loops = CEIL_DIV(params->N, 64); struct Plist<T4,8>* X4 = (struct Plist<T4,8>*)X; struct Plist<T4,8>* E4 = (struct Plist<T4,8>*)E; struct Plist<T8,8>* X8 = (struct Plist<T8,8>*)X; struct Plist<T8,8>* E8 = (struct Plist<T8,8>*)E; const int2* L2 = (const int2*)params->Lut; T2* U2 = ( T2*)U; if (params->bsize == 8) { // If not accumulating zero out the buffer if (params->beta == 0.0f) hipMemsetD8Async((hipDeviceptr_t)U, 0, params->blocks * 64 * sizeof(T), params->stream); if (sizeof(T) == 2 && (params->N & 7) == 0) hipLaunchKernelGGL(( gemm_blocksparse_gated_08x64x08x8_updat<T8,T8,T2>), dim3(grid),dim3(32),0,params->stream, *X8, *E8, L2, params->Gate, U2, params->pcount*8, params->N, loops, params->alpha, params->beta); else hipLaunchKernelGGL(( gemm_blocksparse_gated_08x64x08x4_updat<T4,T4,T2>), dim3(grid),dim3(32),0,params->stream, *X4, *E4, L2, params->Gate, U2, params->pcount*8, params->N, loops, params->alpha, params->beta); } return hipPeekAtLastError(); } template hipError_t BsmmGatedUpdat_CN<VTYPE(float)>(const float* X, const float* E, float* U, bsmm_params* params); template hipError_t BsmmGatedUpdat_CN<VTYPE(ehalf)>(const ehalf* X, const ehalf* E, ehalf* U, bsmm_params* params); template hipError_t BsmmGatedUpdat_CN<VTYPE(bhalf)>(const bhalf* X, const bhalf* E, bhalf* U, bsmm_params* params); #endif // GOOGLE_CUDA
b44bb1e0c38b6b47d2c88ed2c3848fb78c72761d.cu
#if GOOGLE_CUDA #include "ew_op_gpu.h" #include <stdio.h> typedef struct __align__(16) LutEntry { int offsetX; int offsetW; float gate; float unused; } LutEntry; template <bool Fprop, typename TW, typename TX, typename TY> __global__ void __launch_bounds__(32) gemm_blocksparse_gated_08x64x08x8_xprop( const int2* __restrict__ Lut, const float* __restrict__ Gate, const TW* __restrict__ W, const TX* __restrict__ X, TY* Y, int* Lock, int locks, int N /* N is in units of groups of 8 elements each (N/8) */) { if (Fprop) asm(".shared .align 16 .b32 share[576];" ::); // 576 = 8*8 + 64*8 else asm(".shared .align 16 .b32 share[608];" ::); // 608 = 12*8 + 64*8 extern __shared__ LutEntry Lut4_s[]; LutEntry* Lut4s = &Lut4_s[Fprop ? 576/4 : 608/4]; int tid = threadIdx.x; int idx_N = blockIdx.x; int idx_L = blockIdx.y; int4 lut_head = ((const int4*)Lut)[idx_L]; int tid7 = tid & 7; int tid8 = tid >> 3; int tid16 = tid & 16; int readXs = ((tid >> 1) & 7) << 4; int readWs = (tid & 1) << 4; // second half of warp starts 4 rows down readXs += tid16 << 6; // 64*4*4 readWs += tid16 << 3; // 8*4*4 int storXs = (tid8*64 + tid7*4) << 2; int storWs; if (Fprop) storWs = tid << 3; else { // Transpose weights on store to shared // Avoid bank conflicts by shifting writes over by 8 every 2 rows (+tid3*8) int tid3 = tid & 3; int tid4 = tid >> 2; storWs = (tid3*8*2 + tid4 + tid3*8) << 2; readWs += tid16 << 2; // shift over 8 floats every 2 rows, second half of warp starts 4 rows down } int n = idx_N*8 + tid7; bool bn = n < N; int offsetX = (tid8*N + n)*8*2; // unpack lut header int lut_offset = lut_head.x; int lut_size = lut_head.y; int idx_K = lut_head.z; int idx_Lock = lut_head.w; int N8 = N*8*8*2; // 8 lines, 8 elements per index, two bytes per element uint dep_thd_mask = 0xffffffff; dep_thd_mask >>= 32 - tid; int new_lut_size = 0; // prefetch the lut data into shared Lut += lut_offset; #pragma unroll 1 for (int i = tid; i < lut_size; i += 32) { LutEntry entry; *(int2*)&entry = Lut[i]; entry.gate = Gate[entry.offsetW]; // only add the entry to the lut if the gate is non-zero // compiler is stupid about reusing predicate here so use asm uint ballot, warp_non_zero; asm volatile ("{\n\t" ".reg .pred p; \n\t" ".reg .u32 ballot; \n\t" "setp.ne.ftz.f32 p, %2, 0f00000000; \n\t" # if CUDA_VERSION >= 9020 "vote.sync.ballot.b32 ballot, p, 0xffffffff;\n\t" # else "vote.ballot.b32 ballot, p; \n\t" # endif "mov.b32 %0, ballot; \n\t" "popc.b32 %1, ballot; \n\t" "@!p bra GATE_ZERO; \n\t" "}" : "=r"(ballot), "=r"(warp_non_zero) : "f"(entry.gate)); { uint dep_thd_cnt = __popc(dep_thd_mask & ballot); entry.unused = 0; entry.offsetX *= N8; // 8 lines of N per block entry.offsetW *= 64*2; // 64 entries of W per block, 2 bytes each Lut4s[new_lut_size + dep_thd_cnt] = entry; } asm volatile ("\nGATE_ZERO:\n" ::); new_lut_size += warp_non_zero; } //lut_size = new_lut_size; # if CUDA_VERSION >= 9020 asm volatile ("shfl.sync.idx.b32 %0, %1, 0, 0x1f, 0xffffffff;" : "=r"(lut_size) : "r"(new_lut_size)); # else asm volatile ("shfl.idx.b32 %0, %1, 0, 0x1f;" : "=r"(lut_size) : "r"(new_lut_size)); # endif // zero accumulation registers float regY[4][8]; for (int w = 0; w < 4; w++) for (int x = 0; x < 8; x++) // use asm here to ensure this happens after lut table construction and before main loop skipping asm volatile ("mov.b32 %0, 0;" : "=f"(regY[w][x]) :); // skip loop if empty lut // Compiler generates suboptimal code if a simple "for loop" is used. asm volatile (".reg .pred lut_zero; \n\t" "setp.eq.u32 lut_zero, %0, 0; \n\t" "@lut_zero bra.uni END_LOOP;" :: "r"(lut_size)); int i = 0; do { LutEntry entry = Lut4s[i++]; entry.offsetX += offsetX; entry.offsetW += tid*4; const TW* W0; const TX* X0; const TX* X4; // Simplify pointer arithmatic by letting compiler assume all offsets fit in 32 bits. asm("{\n\t" ".reg .u64 x0, x4, w0;\n\t" "mov.b64 w0, {%5, 0};\n\t" "mov.b64 x0, {%6, 0};\n\t" "mov.b64 x4, {%7, 0};\n\t" "add.u64 %0, w0, %3;\n\t" "add.u64 %1, x0, %4;\n\t" "add.u64 %2, x4, %4;\n\t" "}" : "=l"(W0),"=l"(X0),"=l"(X4) : "l"(W), "l"(X), "r"(entry.offsetW), "r"(entry.offsetX), "r"(entry.offsetX + N*4*8*2) ); // Fetch 8 rows at a time from W and X float2 w0 = load(W0, 0); float8 x0 = load(X0, 0, bn); float8 x4 = load(X4, 0, bn); w0 = ew_mul(w0, entry.gate); // store to shared. if (Fprop) st_shared_v2(storWs + 64*8*4, w0); else { // transpose the shared store of W st_shared_v1(storWs + 0*4 + 64*8*4, w0.x); st_shared_v1(storWs + 8*4 + 64*8*4, w0.y); } st_shared_v4(storXs + (0*64 + 0*32)*4, x0.a); st_shared_v4(storXs + (0*64 + 1*32)*4, x0.b); st_shared_v4(storXs + (4*64 + 0*32)*4, x4.a); st_shared_v4(storXs + (4*64 + 1*32)*4, x4.b); // computes an 8x64x8 gemm tile with 4x8 register blocking float regW[4]; float regX[8]; #pragma unroll for (int j = 0; j < 4; j++) { // fetch outer product data ld_shared_v4(readWs + ( 8*j + 64*8 + (Fprop ? 0 : (j>>1)*8))*4, regW ); // shift over 8 floats every 2 rows ld_shared_v4(readXs + (64*j + 0)*4, &regX[0] ); ld_shared_v4(readXs + (64*j + 32)*4, &regX[4] ); // accumulate outer product for (int w = 0; w < 4; w++) for (int x = 0; x < 8; x++) regY[w][x] += regW[w] * regX[x]; } } while (i < lut_size); asm volatile ("\nEND_LOOP:\n":: ); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_N) :); int tidN = (tid >> 1) & 7; int tidK = tid & 1; int tidk = tid >> 4; bool t16 = tid16 != 0; float outY[2][8]; for (int w = 0; w < 2; w++) { for (int x = 0; x < 8; x++) { float swap = t16 ? regY[2*w + 0][x] : regY[2*w + 1][x]; outY[w][x] = t16 ? regY[2*w + 1][x] : regY[2*w + 0][x]; outY[w][x] += shfl_xor(swap, 16); } } n = idx_N*64/8 + tidN; bn = n < N; Y += (idx_K*8 + tidK*4 + tidk)*N + n; if (idx_Lock == 0) { // no lock needed just write out the results store(Y, *(float8*)outY[0], N*0, bn); store(Y, *(float8*)outY[1], N*2, bn); } else { int offsetL = idx_N*locks + idx_Lock - 1; Lock += offsetL; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncwarp(); int offsetC = locks*gridDim.x; int* Count = Lock + offsetC; int count = *Count; __syncwarp(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory store(Y, *(float8*)outY[0], N*0, bn); store(Y, *(float8*)outY[1], N*2, bn); __threadfence(); __syncwarp(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } else { // subsequent blocks must accumulate float8 y0 = load_c(Y, N*0, bn); float8 y2 = load_c(Y, N*2, bn); y0 = ew_add(y0, *(float8*)outY[0]); y2 = ew_add(y2, *(float8*)outY[1]); store(Y, y0, N*0, bn); store(Y, y2, N*2, bn); __threadfence(); __syncwarp(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } } template <bool Fprop, typename TW, typename TX, typename TY> __global__ void __launch_bounds__(32) gemm_blocksparse_gated_08x64x08x4_xprop( const int2* __restrict__ Lut, const float* __restrict__ Gate, const TW* __restrict__ W, const TX* __restrict__ X, TY* Y, int* Lock, int locks, int N) { __shared__ float shrX1[64*8]; __shared__ float shrW1[Fprop ? 8*8 : 12*8]; float2* shrW2 = (float2*)shrW1; float2* shrX2 = (float2*)shrX1; float4* shrX4 = (float4*)shrX1; extern __shared__ LutEntry Lut4s[]; int tid = threadIdx.x; int idx_N = blockIdx.x; int idx_L = blockIdx.y; int4 lut_head = ((const int4*)Lut)[idx_L]; int tid15 = tid & 15; int tid16 = tid >> 4; float2* storW2; float* storW1; if (Fprop) storW2 = &shrW2[tid]; else { int tid3 = tid & 3; int tid4 = tid >> 2; // Transpose weights on store to shared // Avoid bank conflicts by shifting writes over by 8 every 2 rows (+tid3*8) storW1 = &shrW1[tid3*16 + tid4 + tid3*8]; } float4* storX4 = &shrX4[tid]; // float2* readX2 = &shrX2[tid15]; // float2* readW2 = &shrW2[tid16]; float2* readX2 = &shrX2[tid >> 1]; float2* readW2 = &shrW2[tid & 1]; int N4 = N >> 2; int N2 = N4 << 1; int N8 = N4 << 3; int n4 = idx_N*16 + tid15; bool bn = n4 < N4; const TX* X0 = X + tid16*N4 + n4; const TX* X2 = X0 + N2; const TX* X4 = X2 + N2; const TX* X6 = X4 + N2; const TW* W0 = W + tid; // unpack lut header int lut_offset = lut_head.x; int lut_size = lut_head.y; int idx_K = lut_head.z; int idx_Lock = lut_head.w; //printf("%d %2d %d\n", idx_K, tid, lut_size); uint dep_thd_mask = 0xffffffff; dep_thd_mask >>= 32 - tid; int new_lut_size = 0; // prefetch the lut data into shared Lut += lut_offset; #pragma unroll 1 for (int i = tid; i < lut_size; i += 32) { //printf("%d %2d %d %d %d\n", idx_K, tid, i, lut_size, new_lut_size); LutEntry entry; *(int2*)&entry = Lut[i]; entry.gate = Gate[entry.offsetW]; // only add the entry to the lut if the gate is non-zero bool gate_non_zero = entry.gate != 0.0f; //uint gate_ballot = __ballot_sync(0xffffffff, gate_non_zero); uint gate_ballot = __ballot(gate_non_zero); uint warp_non_zero = __popc(gate_ballot); if (gate_non_zero) { uint dep_thd_cnt = __popc(dep_thd_mask & gate_ballot); entry.unused = 0; entry.offsetX *= N8; entry.offsetW *= 32; Lut4s[new_lut_size + dep_thd_cnt] = entry; } new_lut_size += warp_non_zero; } // lut_size = new_lut_size; // # if CUDA_VERSION >= 9020 // lut_size = __shfl_sync(0xffffffff, new_lut_size, 0, 32); // # else lut_size = __shfl(new_lut_size, 0, 32); // # endif //printf("%d %2d %d\n", idx_K, tid, lut_size); // zero accumulation registers float regY[4][4]; for (int w = 0; w < 4; w++) for (int x = 0; x < 4; x++) asm volatile ("mov.b32 %0, 0;" : "=f"(regY[w][x]) :); // skip loop if empty lut // Compiler generates suboptimal code if a simple "for loop" is used. asm volatile (".reg .pred lut_zero; \n\t" "setp.eq.u32 lut_zero, %0, 0; \n\t" "@lut_zero bra.uni END_LOOP;" :: "r"(lut_size)); // loop over each lut entry to compute a gemm block int i = 0; #pragma unroll 1 do { LutEntry entry = Lut4s[i++]; // Fetch 8 rows at a time from W and X TW w0; TX x0, x2, x4, x6; w0 = W0[entry.offsetW]; if (bn) { x0 = X0[entry.offsetX]; x2 = X2[entry.offsetX]; x4 = X4[entry.offsetX]; x6 = X6[entry.offsetX]; } // Convert to float if needed and store to shared. if (Fprop) storW2[0] = ew_mul(to_float(w0), entry.gate); else { // transpose the shared store of W float2 w2 = ew_mul(to_float(w0), entry.gate); storW1[0] = w2.x; storW1[8] = w2.y; } storX4[0*16] = to_float(x0); storX4[2*16] = to_float(x2); storX4[4*16] = to_float(x4); storX4[6*16] = to_float(x6); float regX[4]; float regW[4]; // computes an 8x64x8 gemm block #pragma unroll for (int j = 0; j < 8; j++) { // fetch outer product data *(float2*)&regX[0] = readX2[32*j + 0]; *(float2*)&regW[0] = readW2[ 4*j + 0 + (Fprop ? 0 : (j>>1)*4)]; // shift over 8 floats every 2 rows *(float2*)&regX[2] = readX2[32*j + 16]; *(float2*)&regW[2] = readW2[ 4*j + 2 + (Fprop ? 0 : (j>>1)*4)]; // accumulate outer product for (int w = 0; w < 4; w++) for (int x = 0; x < 4; x++) regY[w][x] += regW[w] * regX[x]; } } while (i < lut_size); asm volatile ("\nEND_LOOP:\n":: ); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid ) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_N) :); // tid = threadIdx.x; // idx_N = blockIdx.x; tid15 = tid >> 1; tid16 = tid & 1; N2 = N >> 1; int n = idx_N*32 + tid15; int yi[4]; yi[0] = (idx_K*8 + tid16*2)*N2 + n; yi[1] = yi[0] + N2; yi[2] = yi[0] + N2*4; yi[3] = yi[2] + N2; //printf("K:%2d N:%d tid:%2d t15:%2d t16:%2d N2:%2d n:%2d yi:%d\n", idx_K, idx_N, tid, tid15, tid16, N2, n, yi[0]); bool bn0 = n+0 < N2; bool bn16 = n+16 < N2; if (idx_Lock == 0) { // no lock needed just write out the results for (int i = 0; i < 4; i++) { store(Y, *(float2*)&regY[i][0], yi[i]+0, bn0 ); store(Y, *(float2*)&regY[i][2], yi[i]+16, bn16); } } else { int offsetL = idx_N*locks + idx_Lock - 1; Lock += offsetL; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncwarp(); int offsetC = locks*gridDim.x; int* Count = Lock + offsetC; int count = *Count; __syncwarp(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory for (int i = 0; i < 4; i++) { store(Y, *(float2*)&regY[i][0], yi[i]+0, bn0 ); store(Y, *(float2*)&regY[i][2], yi[i]+16, bn16); } __threadfence(); __syncwarp(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } else { // subsequent blocks must accumulate float2 y[8]; for (int i = 0; i < 4; i++) { y[i + 0] = load_c(Y, yi[i]+0, bn0 ); y[i + 4] = load_c(Y, yi[i]+16, bn16); y[i + 0].x += regY[i][0]; y[i + 0].y += regY[i][1]; y[i + 4].x += regY[i][2]; y[i + 4].y += regY[i][3]; } for (int i = 0; i < 4; i++) { store(Y, y[i + 0], yi[i]+0, bn0 ); store(Y, y[i + 4], yi[i]+16, bn16); } __threadfence(); __syncwarp(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } } template <typename TX, typename TE, typename TU> __global__ void __launch_bounds__(32) gemm_blocksparse_gated_08x64x08x8_updat( struct Plist<TX,8> X, struct Plist<TE,8> E, const int2* __restrict__ Lut, const float* __restrict__ Gate, TU* U, int params8, int N, int loops, float alpha, float beta) { // add padding for bank-conflict-free stores //__shared__ float2 shrU2[64*8 + 4*8]; // add padding for bank-conflict-free stores asm(".shared .align 16 .b32 share[1088];" ::); // 1088 = (64*8 + 4*8)*2 extern __shared__ float2 shrU2[]; int tid = threadIdx.x; int bid = blockIdx.x; float gate = Gate[bid]; int2 lut_head = Lut[bid]; int tid7 = tid & 7; int tid8 = tid >> 3; int tid1 = tid & 1; int tid2 = (tid >> 1) & 1; int tid4 = tid >> 2; // avoid bank conflicts when writing transpose (+tid7*4) int storeS = tid7*8*8 + tid8 + tid7*4; // 4 threads read blocks of 8x8 each shifted over by 4 int iread = tid4*8*8 + tid4*4; int readXs = iread + tid2*2; int readEs = iread + tid1*2 + 64*8 + 4*8; int idx_X = lut_head.x; int idx_E = lut_head.y; int offsetX = (idx_X*8 + tid8)*N + tid7*8; int offsetE = (idx_E*8 + tid8)*N + tid7*8; int N4 = N; // exit kernel if gate is zero asm ("{\n\t" ".reg .pred p; \n\t" "setp.eq.ftz.f32 p, %0, 0f00000000; \n\t" "@p exit; \n\t" "}" :: "f"(gate)); // This keeps all prior logic outside of the loops. asm("shl.b32 %0, %0, 3;" : "+r"(N4) : ); asm("shl.b32 %0, %0, 2;" : "+r"(storeS) : ); asm("shl.b32 %0, %0, 2;" : "+r"(readXs) : ); asm("shl.b32 %0, %0, 2;" : "+r"(readEs) : ); asm("shl.b32 %0, %0, 1;" : "+r"(offsetX) : ); asm("shl.b32 %0, %0, 1;" : "+r"(offsetE) : ); float regU[4][4]; //[x][e] for (int x = 0; x < 4; x++) for (int e = 0; e < 4; e++) regU[x][e] = 0; int p = 0; #pragma unroll 1 do { const TX* X0; const TE* E0; asm("{\n\t" ".reg .u64 X, E, offsetX, offsetE;\n\t" # if __CUDA_ARCH__ >= 700 "ld.param.u64 X, [%2 + 0x160];\n\t" // WARNING: hard coded param offset. "ld.param.u64 E, [%2 + 0x1a0];\n\t" // WARNING: hard coded param offset. # else "ld.param.u64 X, [%2 + 0x140];\n\t" // WARNING: hard coded param offset. "ld.param.u64 E, [%2 + 0x180];\n\t" // WARNING: hard coded param offset. # endif "cvta.to.global.u64 X, X;\n\t" "cvta.to.global.u64 E, E;\n\t" "mov.b64 offsetX, {%3, 0};\n\t" "mov.b64 offsetE, {%4, 0};\n\t" "add.u64 %0, X, offsetX;\n\t" "add.u64 %1, E, offsetE;\n\t" "}" : "=l"(X0), "=l"(E0) : "r"(p), "r"(offsetX), "r"(offsetE)); p += 8; int n = (tid & 7) << 3; int loop = 0; #pragma unroll 1 do { const TX* X4; const TE* E4; asm("{\n\t" ".reg .u64 N4;\n\t" "mov.b64 N4, {%4, 0};\n\t" "add.u64 %0, N4, %2;\n\t" "add.u64 %1, N4, %3;\n\t" "}" : "=l"(X4),"=l"(E4) : "l"(X0), "l"(E0), "r"(N4) ); TX x0, x4; TE e0, e4; ew_zero(x0); ew_zero(x4); ew_zero(e0); ew_zero(e4); if (n < N) { x0 = __ldg(X0); x4 = __ldg(X4); e0 = __ldg(E0); e4 = __ldg(E4); } // Convert to float if needed and store to shared as transpose. float8 fx0 = to_float(x0); float8 fx4 = to_float(x4); // advance pointer by 64*2 asm ("add.u64 %0, %0, 128;" : "+l"(X0)); n += 64; st_shared_v1(storeS + (0*8 + 0 + (64*8 + 4*8)*0)*4, fx0.a.x); st_shared_v1(storeS + (1*8 + 0 + (64*8 + 4*8)*0)*4, fx0.a.y); st_shared_v1(storeS + (2*8 + 0 + (64*8 + 4*8)*0)*4, fx0.a.z); st_shared_v1(storeS + (3*8 + 0 + (64*8 + 4*8)*0)*4, fx0.a.w); st_shared_v1(storeS + (4*8 + 0 + (64*8 + 4*8)*0)*4, fx0.b.x); st_shared_v1(storeS + (5*8 + 0 + (64*8 + 4*8)*0)*4, fx0.b.y); st_shared_v1(storeS + (6*8 + 0 + (64*8 + 4*8)*0)*4, fx0.b.z); st_shared_v1(storeS + (7*8 + 0 + (64*8 + 4*8)*0)*4, fx0.b.w); st_shared_v1(storeS + (0*8 + 4 + (64*8 + 4*8)*0)*4, fx4.a.x); st_shared_v1(storeS + (1*8 + 4 + (64*8 + 4*8)*0)*4, fx4.a.y); st_shared_v1(storeS + (2*8 + 4 + (64*8 + 4*8)*0)*4, fx4.a.z); st_shared_v1(storeS + (3*8 + 4 + (64*8 + 4*8)*0)*4, fx4.a.w); st_shared_v1(storeS + (4*8 + 4 + (64*8 + 4*8)*0)*4, fx4.b.x); st_shared_v1(storeS + (5*8 + 4 + (64*8 + 4*8)*0)*4, fx4.b.y); st_shared_v1(storeS + (6*8 + 4 + (64*8 + 4*8)*0)*4, fx4.b.z); st_shared_v1(storeS + (7*8 + 4 + (64*8 + 4*8)*0)*4, fx4.b.w); float8 fe0 = to_float(e0); float8 fe4 = to_float(e4); // advance pointer by 64*2 asm ("add.u64 %0, %0, 128;" : "+l"(E0)); st_shared_v1(storeS + (0*8 + 0 + (64*8 + 4*8)*1)*4, fe0.a.x); st_shared_v1(storeS + (1*8 + 0 + (64*8 + 4*8)*1)*4, fe0.a.y); st_shared_v1(storeS + (2*8 + 0 + (64*8 + 4*8)*1)*4, fe0.a.z); st_shared_v1(storeS + (3*8 + 0 + (64*8 + 4*8)*1)*4, fe0.a.w); st_shared_v1(storeS + (4*8 + 0 + (64*8 + 4*8)*1)*4, fe0.b.x); st_shared_v1(storeS + (5*8 + 0 + (64*8 + 4*8)*1)*4, fe0.b.y); st_shared_v1(storeS + (6*8 + 0 + (64*8 + 4*8)*1)*4, fe0.b.z); st_shared_v1(storeS + (7*8 + 0 + (64*8 + 4*8)*1)*4, fe0.b.w); st_shared_v1(storeS + (0*8 + 4 + (64*8 + 4*8)*1)*4, fe4.a.x); st_shared_v1(storeS + (1*8 + 4 + (64*8 + 4*8)*1)*4, fe4.a.y); st_shared_v1(storeS + (2*8 + 4 + (64*8 + 4*8)*1)*4, fe4.a.z); st_shared_v1(storeS + (3*8 + 4 + (64*8 + 4*8)*1)*4, fe4.a.w); st_shared_v1(storeS + (4*8 + 4 + (64*8 + 4*8)*1)*4, fe4.b.x); st_shared_v1(storeS + (5*8 + 4 + (64*8 + 4*8)*1)*4, fe4.b.y); st_shared_v1(storeS + (6*8 + 4 + (64*8 + 4*8)*1)*4, fe4.b.z); st_shared_v1(storeS + (7*8 + 4 + (64*8 + 4*8)*1)*4, fe4.b.w); float regX[4]; float regE[4]; #pragma unroll for (int j = 0; j < 8; j++) { // fetch outer product data ld_shared_v2(readXs + (8*j + 0)*4, &regX[0] ); ld_shared_v2(readEs + (8*j + 0)*4, &regE[0] ); ld_shared_v2(readXs + (8*j + 4)*4, &regX[2] ); ld_shared_v2(readEs + (8*j + 4)*4, &regE[2] ); for (int x = 0; x < 4; x++) for (int e = 0; e < 4; e++) regU[x][e] += regX[x] * regE[e]; } loop++; } while (loop < loops); } while (p < params8); tid = threadIdx.x; bid = blockIdx.x; int offset = bid*32 + tid; TU t2; ew_zero(t2); if (beta != 0.0f) t2 = U[offset]; tid1 = tid & 1; tid2 = (tid >> 1) & 1; tid4 = (tid & -4) << 3; float2* storU2 = &shrU2[tid4 + tid2*4*2 + tid1]; storU2[0*4 + 0] = *(float2*)&regU[0][0]; storU2[0*4 + 2] = *(float2*)&regU[0][2]; storU2[1*4 + 0] = *(float2*)&regU[1][0]; storU2[1*4 + 2] = *(float2*)&regU[1][2]; storU2[4*4 + 0] = *(float2*)&regU[2][0]; storU2[4*4 + 2] = *(float2*)&regU[2][2]; storU2[5*4 + 0] = *(float2*)&regU[3][0]; storU2[5*4 + 2] = *(float2*)&regU[3][2]; float2* readU2 = &shrU2[tid]; float2 u[8]; for (int i = 0; i < 8; i++) u[i] = readU2[i*32]; // Tree reduce for (int j = 4; j > 0; j >>= 1) for (int i = 0; i < j; i++) { u[i].x = u[i].x + u[i+j].x; u[i].y = u[i].y + u[i+j].y; } float2 u2 = *(float2*)u; float2 b2 = to_float(t2); //alpha *= gate; u2.x = alpha*u2.x + beta*b2.x; u2.y = alpha*u2.y + beta*b2.y; store(U, u2, offset); } template <typename TX, typename TE, typename TU> __global__ void __launch_bounds__(32) gemm_blocksparse_gated_08x64x08x4_updat( struct Plist<TX,8> X, struct Plist<TE,8> E, const int2* __restrict__ Lut, const float* __restrict__ Gate, TU* U, int params8, int N, int loops, float alpha, float beta) { __shared__ float shrX1[64*8 + 2*16]; // add padding for bank-conflict-free stores __shared__ float shrE1[64*8 + 2*16]; float2* shrU2 = (float2*)shrX1; float2* shrX2 = (float2*)shrX1; float2* shrE2 = (float2*)shrE1; int tid = threadIdx.x; int bid = blockIdx.x; float gate = Gate[bid]; int2 lut_head = Lut[bid]; int tid15 = tid & 15; int tid16 = tid >> 4; int tid1 = tid & 1; int tid2 = (tid >> 1) & 1; int tid4 = tid >> 2; // avoid bank conflicts when writing transpose (+tid15*2) int istore = tid15*8*4 + tid16 + tid15*2; float* storX = &shrX1[istore]; float* storE = &shrE1[istore]; // 4 threads read blocks of 8x8 each shifted over by 4 (2 shifts of 2 from store) int iread = tid4*4*8 + tid4*2; float2* readX2 = &shrX2[iread + tid2]; float2* readE2 = &shrE2[iread + tid1]; int N4 = N >> 2; int N2 = N4 << 1; int idx_X = lut_head.x; int idx_E = lut_head.y; int offsetX = (idx_X*8 + tid16)*N4; int offsetE = (idx_E*8 + tid16)*N4; // exit kernel if gate is zero asm ("{\n\t" ".reg .pred p; \n\t" "setp.eq.ftz.f32 p, %0, 0f00000000; \n\t" "@p exit; \n\t" "}" :: "f"(gate)); float regX[4]; float regE[4]; float regU[4][4]; //[x][e] for (int x = 0; x < 4; x++) for (int e = 0; e < 4; e++) regU[x][e] = 0; int p = 0; #pragma unroll 1 do { int n = tid15; const TX* X0; const TE* E0; asm("{\n\t" ".reg .u64 X, E;\n\t" # if __CUDA_ARCH__ >= 700 "ld.param.u64 X, [%2 + 0x160];\n\t" // WARNING: hard coded param offset. "ld.param.u64 E, [%2 + 0x1a0];\n\t" // WARNING: hard coded param offset. # else "ld.param.u64 X, [%2 + 0x140];\n\t" // WARNING: hard coded param offset. "ld.param.u64 E, [%2 + 0x180];\n\t" // WARNING: hard coded param offset. # endif "cvta.to.global.u64 %0, X;\n\t" "cvta.to.global.u64 %1, E;\n\t" "}" : "=l"(X0), "=l"(E0) : "r"(p)); p += 8; X0 += offsetX; E0 += offsetE; const TX* X2 = X0 + N2; const TX* X4 = X2 + N2; const TX* X6 = X4 + N2; const TE* E2 = E0 + N2; const TE* E4 = E2 + N2; const TE* E6 = E4 + N2; #pragma unroll 1 for (int i = 0; i < loops; i++) { bool bn = n < N4; TX x0, x2, x4, x6; TE e0, e2, e4, e6; ew_zero(x0); ew_zero(x2); ew_zero(x4); ew_zero(x6); ew_zero(e0); ew_zero(e2); ew_zero(e4); ew_zero(e6); if (bn) { x0 = __ldg(X0+n); x2 = __ldg(X2+n); x4 = __ldg(X4+n); x6 = __ldg(X6+n); e0 = __ldg(E0+n); e2 = __ldg(E2+n); e4 = __ldg(E4+n); e6 = __ldg(E6+n); } n += 16; // Convert to float if needed and store to shared as transpose. float4 fx0 = to_float(x0); float4 fx2 = to_float(x2); float4 fx4 = to_float(x4); float4 fx6 = to_float(x6); storX[0*8 + 0] = fx0.x; storX[1*8 + 0] = fx0.y; storX[2*8 + 0] = fx0.z; storX[3*8 + 0] = fx0.w; storX[0*8 + 2] = fx2.x; storX[1*8 + 2] = fx2.y; storX[2*8 + 2] = fx2.z; storX[3*8 + 2] = fx2.w; storX[0*8 + 4] = fx4.x; storX[1*8 + 4] = fx4.y; storX[2*8 + 4] = fx4.z; storX[3*8 + 4] = fx4.w; storX[0*8 + 6] = fx6.x; storX[1*8 + 6] = fx6.y; storX[2*8 + 6] = fx6.z; storX[3*8 + 6] = fx6.w; float4 fe0 = to_float(e0); float4 fe2 = to_float(e2); float4 fe4 = to_float(e4); float4 fe6 = to_float(e6); storE[0*8 + 0] = fe0.x; storE[1*8 + 0] = fe0.y; storE[2*8 + 0] = fe0.z; storE[3*8 + 0] = fe0.w; storE[0*8 + 2] = fe2.x; storE[1*8 + 2] = fe2.y; storE[2*8 + 2] = fe2.z; storE[3*8 + 2] = fe2.w; storE[0*8 + 4] = fe4.x; storE[1*8 + 4] = fe4.y; storE[2*8 + 4] = fe4.z; storE[3*8 + 4] = fe4.w; storE[0*8 + 6] = fe6.x; storE[1*8 + 6] = fe6.y; storE[2*8 + 6] = fe6.z; storE[3*8 + 6] = fe6.w; #pragma unroll for (int j = 0; j < 8; j++) { // shift over 2 floats every 4 rows *(float2*)&regX[0] = readX2[4*j + 0 + (j>>2)]; *(float2*)&regE[0] = readE2[4*j + 0 + (j>>2)]; *(float2*)&regX[2] = readX2[4*j + 2 + (j>>2)]; *(float2*)&regE[2] = readE2[4*j + 2 + (j>>2)]; for (int x = 0; x < 4; x++) for (int e = 0; e < 4; e++) regU[x][e] += regX[x] * regE[e]; } } } while (p < params8); tid = threadIdx.x; bid = blockIdx.x; int offset = bid*32 + tid; TU t2; ew_zero(t2); if (beta != 0.0f) t2 = U[offset]; tid1 = tid & 1; tid2 = (tid >> 1) & 1; tid4 = (tid & -4) << 3; float2* storU2 = &shrU2[tid4 + tid2*4*2 + tid1]; storU2[0*4 + 0] = *(float2*)&regU[0][0]; storU2[0*4 + 2] = *(float2*)&regU[0][2]; storU2[1*4 + 0] = *(float2*)&regU[1][0]; storU2[1*4 + 2] = *(float2*)&regU[1][2]; storU2[4*4 + 0] = *(float2*)&regU[2][0]; storU2[4*4 + 2] = *(float2*)&regU[2][2]; storU2[5*4 + 0] = *(float2*)&regU[3][0]; storU2[5*4 + 2] = *(float2*)&regU[3][2]; float2* readU2 = &shrU2[tid]; float2 u[8]; for (int i = 0; i < 8; i++) u[i] = readU2[i*32]; // Tree reduce for (int j = 4; j > 0; j >>= 1) for (int i = 0; i < j; i++) { u[i].x = u[i].x + u[i+j].x; u[i].y = u[i].y + u[i+j].y; } float2 u2 = *(float2*)u; float2 b2 = to_float(t2); //alpha *= gate; u2.x = alpha*u2.x + beta*b2.x; u2.y = alpha*u2.y + beta*b2.y; store(U, u2, offset); } template <bool Fprop, CTYPE(T)> cudaError_t BsmmGatedXprop_CN(const T* X, const T* W, T* Y, bsmm_params* params) { dim3 grid(CEIL_DIV(params->N, 64), params->segments, 1); // printf("grid: %d %d\n", grid.x, grid.y); const int2* L2 = (const int2*)params->Lut; const T2* W2 = (const T2*)W; const T4* X4 = (const T4*)X; const T8* X8 = (const T8*)X; T2* Y2 = ( T2*)Y; T8* Y8 = ( T8*)Y; if (params->locks > 0) cuMemsetD32Async((CUdeviceptr)params->Lock, 0, grid.x * params->locks * 2, params->stream); if (params->bsize == 8) { if (sizeof(T) == 2 && (params->N & 7) == 0) gemm_blocksparse_gated_08x64x08x8_xprop<Fprop,T2,T8,T8><<<grid,32,params->shared*2,params->stream>>>(L2, params->Gate, W2, X8, Y8, params->Lock, params->locks, params->N>>3); else gemm_blocksparse_gated_08x64x08x4_xprop<Fprop,T2,T4,T2><<<grid,32,params->shared*2,params->stream>>>(L2, params->Gate, W2, X4, Y2, params->Lock, params->locks, params->N); } return cudaPeekAtLastError(); } template cudaError_t BsmmGatedXprop_CN<true, VTYPE(float)>(const float* X, const float* W, float* Y, bsmm_params* params); template cudaError_t BsmmGatedXprop_CN<true, VTYPE(ehalf)>(const ehalf* X, const ehalf* W, ehalf* Y, bsmm_params* params); template cudaError_t BsmmGatedXprop_CN<true, VTYPE(bhalf)>(const bhalf* X, const bhalf* W, bhalf* Y, bsmm_params* params); template cudaError_t BsmmGatedXprop_CN<false, VTYPE(float)>(const float* X, const float* W, float* Y, bsmm_params* params); template cudaError_t BsmmGatedXprop_CN<false, VTYPE(ehalf)>(const ehalf* X, const ehalf* W, ehalf* Y, bsmm_params* params); template cudaError_t BsmmGatedXprop_CN<false, VTYPE(bhalf)>(const bhalf* X, const bhalf* W, bhalf* Y, bsmm_params* params); template <CTYPE(T)> cudaError_t BsmmGatedUpdat_CN(const T* X, const T* E, T* U, bsmm_params* params) { dim3 grid(params->blocks, 1, 1); int loops = CEIL_DIV(params->N, 64); struct Plist<T4,8>* X4 = (struct Plist<T4,8>*)X; struct Plist<T4,8>* E4 = (struct Plist<T4,8>*)E; struct Plist<T8,8>* X8 = (struct Plist<T8,8>*)X; struct Plist<T8,8>* E8 = (struct Plist<T8,8>*)E; const int2* L2 = (const int2*)params->Lut; T2* U2 = ( T2*)U; if (params->bsize == 8) { // If not accumulating zero out the buffer if (params->beta == 0.0f) cuMemsetD8Async((CUdeviceptr)U, 0, params->blocks * 64 * sizeof(T), params->stream); if (sizeof(T) == 2 && (params->N & 7) == 0) gemm_blocksparse_gated_08x64x08x8_updat<T8,T8,T2><<<grid,32,0,params->stream>>>(*X8, *E8, L2, params->Gate, U2, params->pcount*8, params->N, loops, params->alpha, params->beta); else gemm_blocksparse_gated_08x64x08x4_updat<T4,T4,T2><<<grid,32,0,params->stream>>>(*X4, *E4, L2, params->Gate, U2, params->pcount*8, params->N, loops, params->alpha, params->beta); } return cudaPeekAtLastError(); } template cudaError_t BsmmGatedUpdat_CN<VTYPE(float)>(const float* X, const float* E, float* U, bsmm_params* params); template cudaError_t BsmmGatedUpdat_CN<VTYPE(ehalf)>(const ehalf* X, const ehalf* E, ehalf* U, bsmm_params* params); template cudaError_t BsmmGatedUpdat_CN<VTYPE(bhalf)>(const bhalf* X, const bhalf* E, bhalf* U, bsmm_params* params); #endif // GOOGLE_CUDA
b709e6eeaa9fcf9137f06912eae6b40d155e7f30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ComplexToCASFilter.h" __global__ void ComplexToCASKernel(hipfftComplex *ComplexInput, float *CASOutput, int imgSize, int nSlices) { // CUDA kernel for converting CASImgs to imgs int i = blockIdx.x * blockDim.x + threadIdx.x; // Column int j = blockIdx.y * blockDim.y + threadIdx.y; // Row // Are we outside the bounds of the image? if (i >= imgSize || i < 0 || j >= imgSize || j < 0) { return; } // Each thread will do all the slices for position X and Y for (int k = 0; k < nSlices; k++) { // CASimgs is the same dimensions as imgs int ndx = i + j * imgSize + k * imgSize * imgSize; // Summation of the real and imaginary components CASOutput[ndx] = ComplexInput[ndx].x + ComplexInput[ndx].y; } return; } void ComplexToCASFilter::UpdateFilter(hipfftComplex *Input, float *Output, hipStream_t *stream) { // Convert a hipfftComplex array to CAS array // std::cout << "ComplexToCASFilter()" << '\n'; // Check the input parameters if (this->VolumeSize <= 0) { std::cerr << "Error ComplexToCASFilter(): Volume size parameter was not set. Please use SetVolumeSize() function first." << '\n'; return; } // Flag to specify that we are running a volume or a stack of 2D images int NumberSlices; // Running a volume if the number of slices is not specified if (this->nSlices <= 0) { NumberSlices = this->VolumeSize; } else { // Running a stack of 2D images NumberSlices = this->nSlices; } // Define CUDA kernel launch dimensions // Iterate over the X,Y positions for all slices int Grid = ceil(double(this->VolumeSize) / double(32)); Grid = max(Grid, 1); // At least one dim3 dimGrid(Grid, Grid, 1); dim3 dimBlock(32, 32, 1); // i.e. 32*32 threads // std::cout << "ComplexToCASFilter()..." << '\n'; // std::cout << "VolumeSize: " << VolumeSize << '\n'; // std::cout << "NumberSlices: " << NumberSlices << '\n'; // std::cout << "this->nSlices: " << this->nSlices << '\n'; // std::cout << "Grid: " << Grid << '\n'; // Use the CUDA stream if one was provided if (stream != NULL) { hipLaunchKernelGGL(( ComplexToCASKernel), dim3(dimGrid), dim3(dimBlock), 0, *stream, Input, Output, this->VolumeSize, NumberSlices); } else { hipLaunchKernelGGL(( ComplexToCASKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Input, Output, this->VolumeSize, NumberSlices); } gpuErrorCheck(hipPeekAtLastError()); return; };
b709e6eeaa9fcf9137f06912eae6b40d155e7f30.cu
#include "ComplexToCASFilter.h" __global__ void ComplexToCASKernel(cufftComplex *ComplexInput, float *CASOutput, int imgSize, int nSlices) { // CUDA kernel for converting CASImgs to imgs int i = blockIdx.x * blockDim.x + threadIdx.x; // Column int j = blockIdx.y * blockDim.y + threadIdx.y; // Row // Are we outside the bounds of the image? if (i >= imgSize || i < 0 || j >= imgSize || j < 0) { return; } // Each thread will do all the slices for position X and Y for (int k = 0; k < nSlices; k++) { // CASimgs is the same dimensions as imgs int ndx = i + j * imgSize + k * imgSize * imgSize; // Summation of the real and imaginary components CASOutput[ndx] = ComplexInput[ndx].x + ComplexInput[ndx].y; } return; } void ComplexToCASFilter::UpdateFilter(cufftComplex *Input, float *Output, cudaStream_t *stream) { // Convert a cufftComplex array to CAS array // std::cout << "ComplexToCASFilter()" << '\n'; // Check the input parameters if (this->VolumeSize <= 0) { std::cerr << "Error ComplexToCASFilter(): Volume size parameter was not set. Please use SetVolumeSize() function first." << '\n'; return; } // Flag to specify that we are running a volume or a stack of 2D images int NumberSlices; // Running a volume if the number of slices is not specified if (this->nSlices <= 0) { NumberSlices = this->VolumeSize; } else { // Running a stack of 2D images NumberSlices = this->nSlices; } // Define CUDA kernel launch dimensions // Iterate over the X,Y positions for all slices int Grid = ceil(double(this->VolumeSize) / double(32)); Grid = max(Grid, 1); // At least one dim3 dimGrid(Grid, Grid, 1); dim3 dimBlock(32, 32, 1); // i.e. 32*32 threads // std::cout << "ComplexToCASFilter()..." << '\n'; // std::cout << "VolumeSize: " << VolumeSize << '\n'; // std::cout << "NumberSlices: " << NumberSlices << '\n'; // std::cout << "this->nSlices: " << this->nSlices << '\n'; // std::cout << "Grid: " << Grid << '\n'; // Use the CUDA stream if one was provided if (stream != NULL) { ComplexToCASKernel<<<dimGrid, dimBlock, 0, *stream>>>(Input, Output, this->VolumeSize, NumberSlices); } else { ComplexToCASKernel<<<dimGrid, dimBlock>>>(Input, Output, this->VolumeSize, NumberSlices); } gpuErrorCheck(cudaPeekAtLastError()); return; };
88069ca8be83aacbb8031a6171a7f7949a4aa94c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "datadef.h" #include "LCRNG.cuh" __global__ void rebase_yield_kernel(unsigned N, unsigned RNUM_PER_THREAD, float keff, unsigned* rn_bank, unsigned* yield){ int tid = threadIdx.x+blockIdx.x*blockDim.x; if (tid >= N){return;} if (yield[tid]==0){return;} //if(done[tid]){return;} unsigned this_yield = yield[tid]; unsigned rn = rn_bank[tid]; float new_yield = (float)this_yield / keff; unsigned i_new_yield = (unsigned) new_yield; float rn1 = get_rand(&rn); if((float)i_new_yield+rn1 < new_yield){ this_yield = i_new_yield+1; } else{ this_yield = i_new_yield; } //printf("%u %6.4E %6.4E %u %u %6.4E\n",yield[tid],keff,new_yield,i_new_yield,this_yield,rn1); yield[tid] = this_yield; rn_bank[tid]= rn; } void rebase_yield( unsigned NUM_THREADS, unsigned RNUM_PER_THREAD, unsigned N, float keff, unsigned* rn_bank, unsigned* yield){ unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS; hipLaunchKernelGGL(( rebase_yield_kernel) , dim3(blks), dim3(NUM_THREADS) , 0, 0, N, RNUM_PER_THREAD, keff, rn_bank, yield); hipDeviceSynchronize(); }
88069ca8be83aacbb8031a6171a7f7949a4aa94c.cu
#include <cuda.h> #include <stdio.h> #include "datadef.h" #include "LCRNG.cuh" __global__ void rebase_yield_kernel(unsigned N, unsigned RNUM_PER_THREAD, float keff, unsigned* rn_bank, unsigned* yield){ int tid = threadIdx.x+blockIdx.x*blockDim.x; if (tid >= N){return;} if (yield[tid]==0){return;} //if(done[tid]){return;} unsigned this_yield = yield[tid]; unsigned rn = rn_bank[tid]; float new_yield = (float)this_yield / keff; unsigned i_new_yield = (unsigned) new_yield; float rn1 = get_rand(&rn); if((float)i_new_yield+rn1 < new_yield){ this_yield = i_new_yield+1; } else{ this_yield = i_new_yield; } //printf("%u %6.4E %6.4E %u %u %6.4E\n",yield[tid],keff,new_yield,i_new_yield,this_yield,rn1); yield[tid] = this_yield; rn_bank[tid]= rn; } void rebase_yield( unsigned NUM_THREADS, unsigned RNUM_PER_THREAD, unsigned N, float keff, unsigned* rn_bank, unsigned* yield){ unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS; rebase_yield_kernel <<< blks, NUM_THREADS >>> ( N, RNUM_PER_THREAD, keff, rn_bank, yield); cudaThreadSynchronize(); }
ff5fd9cc440654b88e1beb2edf8518fc3c6de619.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sd_t_s1_1_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3, double *t2_d, double *v2_d,size_t p4, size_t total_x, double* t3d) { size_t h1,h2,h3,p6; __shared__ double t2_shm[T1*4*Tcomm]; for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; size_t rest_x=blockIdx.x; size_t thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); }
ff5fd9cc440654b88e1beb2edf8518fc3c6de619.cu
#include "includes.h" __global__ void sd_t_s1_1_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3, double *t2_d, double *v2_d,size_t p4, size_t total_x, double* t3d) { size_t h1,h2,h3,p6; __shared__ double t2_shm[T1*4*Tcomm]; for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; size_t rest_x=blockIdx.x; size_t thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); }
53d6d3699688ce398fd6260e9c239cb4a329bd94.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" // input: points(b, c, n) idx(b, m) // output: out(b, c, m) __global__ void gather_points_kernel(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; } } } } void gather_points_kernel_wrapper(int b, int c, int n, int npoints, const float *points, const int *idx, float *out) { hipLaunchKernelGGL(( gather_points_kernel), dim3(dim3(b, c, 1)), dim3(opt_n_threads(npoints)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), b, c, n, npoints, points, idx, out); CUDA_CHECK_ERRORS(); } // input: grad_out(b, c, m) idx(b, m) // output: grad_points(b, c, n) __global__ void gather_points_grad_kernel(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; atomicAdd(grad_points + (i * c + l) * n + a, grad_out[(i * c + l) * m + j]); } } } } void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, const float *grad_out, const int *idx, float *grad_points) { hipLaunchKernelGGL(( gather_points_grad_kernel), dim3(dim3(b, c, 1)), dim3(opt_n_threads(npoints)), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), b, c, n, npoints, grad_out, idx, grad_points); CUDA_CHECK_ERRORS(); } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2) { const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } // Input dataset: (b, n, 3), tmp: (b, n) // Ouput idxs (b, m) template <unsigned int block_size> __global__ void furthest_point_sampling_kernel( int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * 3; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[old * 3 + 0]; float y1 = dataset[old * 3 + 1]; float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { float x2, y2, z2; x2 = dataset[k * 3 + 0]; y2 = dataset[k * 3 + 1]; z2 = dataset[k * 3 + 2]; float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); if (mag <= 1e-3) continue; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void furthest_point_sampling_kernel_wrapper(int b, int n, int m, const float *dataset, float *temp, int *idxs) { unsigned int n_threads = opt_n_threads(n); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); switch (n_threads) { case 512: hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 256: hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 128: hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 64: hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 32: hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 16: hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 8: hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 4: hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 2: hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; case 1: hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); break; default: hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, stream, b, n, m, dataset, temp, idxs); } CUDA_CHECK_ERRORS(); }
53d6d3699688ce398fd6260e9c239cb4a329bd94.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" // input: points(b, c, n) idx(b, m) // output: out(b, c, m) __global__ void gather_points_kernel(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; } } } } void gather_points_kernel_wrapper(int b, int c, int n, int npoints, const float *points, const int *idx, float *out) { gather_points_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0, at::cuda::getCurrentCUDAStream()>>>( b, c, n, npoints, points, idx, out); CUDA_CHECK_ERRORS(); } // input: grad_out(b, c, m) idx(b, m) // output: grad_points(b, c, n) __global__ void gather_points_grad_kernel(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int l = blockIdx.y; l < c; l += gridDim.y) { for (int j = threadIdx.x; j < m; j += blockDim.x) { int a = idx[i * m + j]; atomicAdd(grad_points + (i * c + l) * n + a, grad_out[(i * c + l) * m + j]); } } } } void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, const float *grad_out, const int *idx, float *grad_points) { gather_points_grad_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0, at::cuda::getCurrentCUDAStream()>>>(b, c, n, npoints, grad_out, idx, grad_points); CUDA_CHECK_ERRORS(); } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2) { const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } // Input dataset: (b, n, 3), tmp: (b, n) // Ouput idxs (b, m) template <unsigned int block_size> __global__ void furthest_point_sampling_kernel( int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * 3; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[old * 3 + 0]; float y1 = dataset[old * 3 + 1]; float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { float x2, y2, z2; x2 = dataset[k * 3 + 0]; y2 = dataset[k * 3 + 1]; z2 = dataset[k * 3 + 2]; float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); if (mag <= 1e-3) continue; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void furthest_point_sampling_kernel_wrapper(int b, int n, int m, const float *dataset, float *temp, int *idxs) { unsigned int n_threads = opt_n_threads(n); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); switch (n_threads) { case 512: furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 256: furthest_point_sampling_kernel<256><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 128: furthest_point_sampling_kernel<128><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 64: furthest_point_sampling_kernel<64><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 32: furthest_point_sampling_kernel<32><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 16: furthest_point_sampling_kernel<16><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 8: furthest_point_sampling_kernel<8><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 4: furthest_point_sampling_kernel<4><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 2: furthest_point_sampling_kernel<2><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; case 1: furthest_point_sampling_kernel<1><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); break; default: furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>( b, n, m, dataset, temp, idxs); } CUDA_CHECK_ERRORS(); }
13b1af149f3566e04309a803eefcf2e2ee9533b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/Exceptions.h> #include <THH/THHTensorMathReduce.cuh> #include <math.h> #include "ATen/native/Distance.h" namespace at { namespace native { namespace { static const int forward_threads = 256; template <typename scalar_t> static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val); template <> __forceinline__ __device__ float device_sqrt(float val) { return ::sqrtf(val); } template <> __forceinline__ __device__ double device_sqrt(double val) { return ::sqrt(val); } template <typename scalar_t> struct dists { static __forceinline__ __device__ scalar_t sign(scalar_t val) { return (0 < val) - (val < 0); } // Zero norm struct zero { static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff != 0.0; } static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; } static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; } }; // One norm struct one { static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff; } static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; } static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; } static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff); } }; // Special case backward when p is less than two struct lt_two { static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : sign(diff) * ::pow(std::abs(diff), p - 1) * grad / ::pow(dist, p - 1); } }; // Two norm struct two { static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff * diff; } static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return device_sqrt<scalar_t>(agg); } static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; } static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : grad * diff / dist; } }; // General p norm struct p { static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += ::pow(diff, p); } static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return ::pow(agg, static_cast<scalar_t>(1) / p); } static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; } static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : diff * ::pow(std::abs(diff), p - 2) * grad / ::pow(dist, p - 1); } }; // Inf norm struct inf { static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { if (diff > agg) { agg = diff; } } static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; } static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { if (other > update) { update = other; } } static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff) * (std::abs(diff) == dist); } }; }; template <typename scalar_t, typename F> __global__ static void pdist_kernel_cuda_impl(scalar_t * result, const scalar_t * self, const int64_t n, const int64_t m, const scalar_t p, const double n2, const double n2_squared_minus_1) { const int k = blockIdx.x; const int stride = blockDim.x; // The -1 accounts for floating point truncation issues int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; const scalar_t * const start = self + i * m; const scalar_t * const end = start + m; const scalar_t * a = start + threadIdx.x; const scalar_t * b = self + j * m + threadIdx.x; scalar_t agg = 0.0; for (; a < end; a += stride, b += stride) { F::inc(agg, std::abs(*a - *b), p); } // Reduce warps for (int offset = warpSize / 2; offset > 0; offset /= 2) { F::agg(agg, WARP_SHFL_DOWN(agg, offset)); } // Reduce block // This shared memory is significantly larger than necessary, but the // assumption is that it's not a bottleneck, and this is simple __shared__ scalar_t shared[forward_threads]; int lane = threadIdx.x % warpSize; int warp_id = threadIdx.x / warpSize; if (lane == 0) { shared[warp_id] = agg; } __syncthreads(); agg = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0; if (warp_id == 0) { // Only reduce theads with nonzero data for (int offset = blockDim.x / warpSize / 2; offset > 0; offset /= 2) { F::agg(agg, WARP_SHFL_DOWN(agg, offset)); } } if (threadIdx.x == 0) { result[k] = F::finish(agg, p); } } template <typename scalar_t, typename F> __global__ static void pdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * self, const scalar_t * dist, int64_t gs, const int64_t n, const int64_t m, const int64_t combs, const scalar_t p, const double n2, const double n2_squared_minus_1) { const int k = blockIdx.y * blockDim.y + threadIdx.y; const int init = blockIdx.x * blockDim.x + threadIdx.x; const int stride = blockDim.x * gridDim.x; if (k >= combs) { return; } // The -1 accounts for floating point truncation issues int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const scalar_t grad_k = grad[k * gs]; const scalar_t dist_k = dist[k]; const scalar_t * const start = self + i * m; const scalar_t * const end = start + m; const scalar_t * self_i = start + init; const scalar_t * self_j = self + j * m + init; scalar_t * buff_i = buffer + (ib * n + i) * m + init; scalar_t * buff_j = buffer + (jb * n + j) * m + init; for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride, buff_j += stride) { const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p); *buff_i = res; *buff_j = -res; } } void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, double p) { const dim3 grid(result.numel()); const dim3 block(forward_threads); int64_t n = self.size(0); int64_t m = self.size(1); // https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do // some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device. const double n2 = n - .5; const double n2_squared_minus_1 = n2 * n2 - 1; AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda", [&] { if (p == 0.0) { hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1); } else if (p == 1.0) { hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1); } else if (p == 2.0) { hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1); } else if (std::isinf(p)) { hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1); } else { hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1); } }); AT_CUDA_CHECK(hipGetLastError()); } void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) { if (p == 0.0 || grad.numel() == 0 || self.numel() == 0) { result.fill_(0); return; } const int64_t n = result.size(0); int64_t m = self.size(1); const int block_x = 64; // NB: be careful with changing block_y; as it's currently written, grid_y is limited to be 2^16. // From binary search, block_y of 16 gives us max pdist dim0 of 1449, // block_y of 4 gives us max pdist dim0 of 725. const int block_y = 16; const int grid_x = (m + block_x * 8 - 1) / (block_x * 8); const int grid_y = (dist.numel() + block_y - 1) / block_y; const dim3 grid(grid_x, grid_y); const dim3 block(block_x, block_y); // https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do // some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device. const double n2 = n - .5; const double n2_squared_minus_1 = n2 * n2 - 1; Tensor buffer = at::empty({n - 1, result.size(0), result.size(1)}, result.options()); AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda_backward", [&] { if (p == 1.0) { hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1); } else if (p < 2.0) { hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1); } else if (p == 2.0) { hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1); } else if (std::isinf(p)) { hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1); } else { hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1); } }); AT_CUDA_CHECK(hipGetLastError()); at::sum_out(result, buffer, 0); } } // anonymous namespace REGISTER_DISPATCH(pdist_forward_stub, &pdist_forward_kernel_impl); REGISTER_DISPATCH(pdist_backward_stub, &pdist_backward_kernel_impl); }} // at::native
13b1af149f3566e04309a803eefcf2e2ee9533b9.cu
#include <ATen/ATen.h> #include <ATen/cuda/Exceptions.h> #include <THC/THCTensorMathReduce.cuh> #include <math.h> #include "ATen/native/Distance.h" namespace at { namespace native { namespace { static const int forward_threads = 256; template <typename scalar_t> static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val); template <> __forceinline__ __device__ float device_sqrt(float val) { return ::sqrtf(val); } template <> __forceinline__ __device__ double device_sqrt(double val) { return ::sqrt(val); } template <typename scalar_t> struct dists { static __forceinline__ __device__ scalar_t sign(scalar_t val) { return (0 < val) - (val < 0); } // Zero norm struct zero { static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff != 0.0; } static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; } static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; } }; // One norm struct one { static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff; } static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; } static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; } static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff); } }; // Special case backward when p is less than two struct lt_two { static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : sign(diff) * std::pow(std::abs(diff), p - 1) * grad / std::pow(dist, p - 1); } }; // Two norm struct two { static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff * diff; } static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return device_sqrt<scalar_t>(agg); } static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; } static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : grad * diff / dist; } }; // General p norm struct p { static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += std::pow(diff, p); } static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return std::pow(agg, static_cast<scalar_t>(1) / p); } static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; } static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : diff * std::pow(std::abs(diff), p - 2) * grad / std::pow(dist, p - 1); } }; // Inf norm struct inf { static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { if (diff > agg) { agg = diff; } } static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; } static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { if (other > update) { update = other; } } static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff) * (std::abs(diff) == dist); } }; }; template <typename scalar_t, typename F> __global__ static void pdist_kernel_cuda_impl(scalar_t * result, const scalar_t * self, const int64_t n, const int64_t m, const scalar_t p, const double n2, const double n2_squared_minus_1) { const int k = blockIdx.x; const int stride = blockDim.x; // The -1 accounts for floating point truncation issues int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; const scalar_t * const start = self + i * m; const scalar_t * const end = start + m; const scalar_t * a = start + threadIdx.x; const scalar_t * b = self + j * m + threadIdx.x; scalar_t agg = 0.0; for (; a < end; a += stride, b += stride) { F::inc(agg, std::abs(*a - *b), p); } // Reduce warps for (int offset = warpSize / 2; offset > 0; offset /= 2) { F::agg(agg, WARP_SHFL_DOWN(agg, offset)); } // Reduce block // This shared memory is significantly larger than necessary, but the // assumption is that it's not a bottleneck, and this is simple __shared__ scalar_t shared[forward_threads]; int lane = threadIdx.x % warpSize; int warp_id = threadIdx.x / warpSize; if (lane == 0) { shared[warp_id] = agg; } __syncthreads(); agg = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0; if (warp_id == 0) { // Only reduce theads with nonzero data for (int offset = blockDim.x / warpSize / 2; offset > 0; offset /= 2) { F::agg(agg, WARP_SHFL_DOWN(agg, offset)); } } if (threadIdx.x == 0) { result[k] = F::finish(agg, p); } } template <typename scalar_t, typename F> __global__ static void pdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * self, const scalar_t * dist, int64_t gs, const int64_t n, const int64_t m, const int64_t combs, const scalar_t p, const double n2, const double n2_squared_minus_1) { const int k = blockIdx.y * blockDim.y + threadIdx.y; const int init = blockIdx.x * blockDim.x + threadIdx.x; const int stride = blockDim.x * gridDim.x; if (k >= combs) { return; } // The -1 accounts for floating point truncation issues int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const scalar_t grad_k = grad[k * gs]; const scalar_t dist_k = dist[k]; const scalar_t * const start = self + i * m; const scalar_t * const end = start + m; const scalar_t * self_i = start + init; const scalar_t * self_j = self + j * m + init; scalar_t * buff_i = buffer + (ib * n + i) * m + init; scalar_t * buff_j = buffer + (jb * n + j) * m + init; for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride, buff_j += stride) { const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p); *buff_i = res; *buff_j = -res; } } void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, double p) { const dim3 grid(result.numel()); const dim3 block(forward_threads); int64_t n = self.size(0); int64_t m = self.size(1); // https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do // some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device. const double n2 = n - .5; const double n2_squared_minus_1 = n2 * n2 - 1; AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda", [&] { if (p == 0.0) { pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1); } else if (p == 1.0) { pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1); } else if (p == 2.0) { pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1); } else if (std::isinf(p)) { pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1); } else { pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1); } }); AT_CUDA_CHECK(cudaGetLastError()); } void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) { if (p == 0.0 || grad.numel() == 0 || self.numel() == 0) { result.fill_(0); return; } const int64_t n = result.size(0); int64_t m = self.size(1); const int block_x = 64; // NB: be careful with changing block_y; as it's currently written, grid_y is limited to be 2^16. // From binary search, block_y of 16 gives us max pdist dim0 of 1449, // block_y of 4 gives us max pdist dim0 of 725. const int block_y = 16; const int grid_x = (m + block_x * 8 - 1) / (block_x * 8); const int grid_y = (dist.numel() + block_y - 1) / block_y; const dim3 grid(grid_x, grid_y); const dim3 block(block_x, block_y); // https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do // some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device. const double n2 = n - .5; const double n2_squared_minus_1 = n2 * n2 - 1; Tensor buffer = at::empty({n - 1, result.size(0), result.size(1)}, result.options()); AT_DISPATCH_FLOATING_TYPES(self.type(), "pdist_cuda_backward", [&] { if (p == 1.0) { pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1); } else if (p < 2.0) { pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1); } else if (p == 2.0) { pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1); } else if (std::isinf(p)) { pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1); } else { pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1); } }); AT_CUDA_CHECK(cudaGetLastError()); at::sum_out(result, buffer, 0); } } // anonymous namespace REGISTER_DISPATCH(pdist_forward_stub, &pdist_forward_kernel_impl); REGISTER_DISPATCH(pdist_backward_stub, &pdist_backward_kernel_impl); }} // at::native
21ae3c56b7f3eadf3f09e9f346f5748fd1bf20b3.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <common_functions.h> #include <hip/hip_runtime_api.h> #include <hip/driver_types.h> #include <host_defines.h> #include <stdio.h> #include <sys/time.h> #include <iostream> #include <ostream> //Param #define PRINT false //Running params #define CPU_MULT true #define CUDA_MULT true #define CUDA_MULT_TRANSPOSE true #define CUDA_MULT_SHARED true #define CUDA_MULT_SHARED_TRANSPOSE true //Matrix params #define MATRIX_SET_1 false #define MATRIX_SIZE 1000 #define MATRIX_TYPE float const int TILE_WIDTH = 16; //Macros #define ELEM(row,column,rowSize) (column+row*rowSize) /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. * * See cuda.h for error code descriptions. */ #define CHECK_CUDA_RESULT(N) { \ hipError_t result = N; \ if (result != 0) { \ printf("CUDA call on line %d returned error %d\n", __LINE__, \ result); \ exit(1); \ } \ } using namespace std; struct timeval tv1, tv2; double diff; __host__ void printMatrix(MATRIX_TYPE* matrix, int rowCount, int columnCount) { for (int row = 0; row < rowCount; ++row) { for (int column = 0; column < columnCount; ++column) { cout << matrix[ELEM(row, column, rowCount)] << " "; } cout << endl; } } __host__ MATRIX_TYPE * createMatrixTransposta(int rowCount, int columnCount, int initValue, bool colValue) { MATRIX_TYPE * matrix = (MATRIX_TYPE*) malloc( sizeof(MATRIX_TYPE) * rowCount * columnCount); for (int row = 0; row < rowCount; ++row) { for (int column = 0; column < columnCount; ++column) { matrix[ELEM(row, column, rowCount)] = colValue ? column + 1 : initValue; } } return matrix; } __host__ MATRIX_TYPE * createMatrix(int rowCount, int columnCount, int initValue, bool rowValue) { MATRIX_TYPE * matrix = (MATRIX_TYPE*) malloc( sizeof(MATRIX_TYPE) * rowCount * columnCount); for (int row = 0; row < rowCount; ++row) { for (int column = 0; column < columnCount; ++column) { matrix[ELEM(row, column, rowCount)] = rowValue ? row + 1 : initValue; } } return matrix; } __host__ MATRIX_TYPE * createMatrix(int rowCount, int columnCount) { return createMatrix(rowCount, columnCount, 1, true); } __host__ MATRIX_TYPE * createMatrix(int rowCount, int columnCount, int initValue) { return createMatrix(rowCount, columnCount, initValue, false); } __global__ void matrixCompare(bool* result, MATRIX_TYPE* matrixA, MATRIX_TYPE * matrixB, int matrixSize) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (col < matrixSize && row < matrixSize) { result[ELEM(row, col, matrixSize)] = (matrixA[ELEM(row, col, matrixSize)] == matrixA[ELEM(row, col, matrixSize)]); } } __host__ void multMatrix(MATRIX_TYPE * matrixA, MATRIX_TYPE * matrixB, MATRIX_TYPE * matrixC, int matrixSize) { for (int row = 0; row < matrixSize; ++row) { for (int column = 0; column < matrixSize; ++column) { MATRIX_TYPE sum = 0; for (int k = 0; k < matrixSize; ++k) { sum += matrixA[ELEM(row, k, matrixSize)] * matrixB[ELEM(k, column, matrixSize)]; } matrixC[ELEM(row, column, matrixSize)] = sum; } } } __global__ void multiMatrixCUDA(MATRIX_TYPE * matrixA, MATRIX_TYPE * matrixB, MATRIX_TYPE * matrixC, int matrixSize) { unsigned int column = (blockDim.x * blockIdx.x) + threadIdx.x; unsigned int row = (blockDim.y * blockIdx.y) + threadIdx.y; if (column < matrixSize && row < matrixSize) { MATRIX_TYPE sum = 0; for (int k = 0; k < matrixSize; ++k) { sum += matrixA[ELEM(row, k, matrixSize)] * matrixB[ELEM(k, column, matrixSize)]; } matrixC[ELEM(row, column, matrixSize)] = sum; } } __global__ void matrixTranspose(MATRIX_TYPE * matrix_in, MATRIX_TYPE * matrix_out, int matrixSize) { int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < matrixSize && row < matrixSize) { matrix_out[ELEM(row, column, matrixSize)] = matrix_in[ELEM(column, row, matrixSize)]; } } __global__ void multiMatrixCUDABTranspose(MATRIX_TYPE * matrixA, MATRIX_TYPE * matrixBTransposed, MATRIX_TYPE * matrixC, int matrixSize) { int column = (blockDim.x * blockIdx.x) + threadIdx.x; int row = (blockDim.y * blockIdx.y) + threadIdx.y; if (column < matrixSize && row < matrixSize) { MATRIX_TYPE sum = 0; for (int k = 0; k < matrixSize; ++k) { sum += matrixA[ELEM(row, k, matrixSize)] * matrixBTransposed[ELEM(column, k, matrixSize)]; } matrixC[ELEM(row, column, matrixSize)] = sum; } } __global__ void matMultTileCuda(const float *A, const float *B, float *C, int N){ __shared__ float a_tile[TILE_WIDTH][TILE_WIDTH], b_tile[TILE_WIDTH][TILE_WIDTH]; int qtd_tiles = N/TILE_WIDTH + (N%TILE_WIDTH==0?0:1); int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; int offset; float sum = 0.0; for (int tile_ind = 0; tile_ind < qtd_tiles; ++tile_ind) { offset = tile_ind*TILE_WIDTH; if(i<N && offset+threadIdx.x< N){ a_tile[threadIdx.y][threadIdx.x] = A[ELEM(i, offset+threadIdx.x, N)]; } else{ a_tile[threadIdx.y][threadIdx.x] = 0.0; } if(threadIdx.y+offset<N && j< N){ b_tile[threadIdx.y][threadIdx.x] = B[ELEM(threadIdx.y+offset, j, N)]; } else{ b_tile[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { sum += a_tile[threadIdx.y][k]*b_tile[k][threadIdx.x]; } __syncthreads(); } if(i<N && j<N) C[ELEM(i,j,N)] = sum; } __global__ void multiMatrixCUDAShared(MATRIX_TYPE* matrixA, MATRIX_TYPE* matrixB, MATRIX_TYPE* matrixC, int matrixSize) { __shared__ int matrixSharedA[TILE_WIDTH * TILE_WIDTH]; __shared__ int matrixSharedB[TILE_WIDTH * TILE_WIDTH]; //Row and column of element to calculate int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col = blockIdx.x * TILE_WIDTH + threadIdx.x; MATRIX_TYPE sum = 0; int tileCount = ((matrixSize - 1) / TILE_WIDTH) + 1; //Iterate tiles to compute the sum for (int tileIndex = 0; tileIndex < tileCount; ++tileIndex) { int colA = tileIndex * TILE_WIDTH + threadIdx.x; //Collaborative loading of A and B tiles into shared memory if (row < matrixSize && colA < matrixSize) matrixSharedA[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = matrixA[row * matrixSize + colA]; else matrixSharedA[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = 0; int rowB = tileIndex * TILE_WIDTH + threadIdx.y; if (col < matrixSize && rowB < matrixSize) matrixSharedB[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = matrixB[(rowB) * matrixSize + col]; else matrixSharedB[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) sum += matrixSharedA[ELEM(threadIdx.y, k, TILE_WIDTH)] * matrixSharedB[ELEM(k, threadIdx.x, TILE_WIDTH)]; __syncthreads(); } if (row < matrixSize && col < matrixSize) { matrixC[ELEM(row, col, matrixSize)] = sum; } } __global__ void multiMatrixCUDASharedTransposed(MATRIX_TYPE* matrixA, MATRIX_TYPE* matrixBTransposed, MATRIX_TYPE* matrixC, int matrixSize) { __shared__ int matrixSharedA[TILE_WIDTH * TILE_WIDTH]; __shared__ int matrixSharedB[TILE_WIDTH * TILE_WIDTH]; //Row and column of element to calculate int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col = blockIdx.x * TILE_WIDTH + threadIdx.x; MATRIX_TYPE sum = 0; int tileCount = ((matrixSize - 1) / TILE_WIDTH) + 1; //Iterate tiles to compute the sum for (int tileIndex = 0; tileIndex < tileCount; ++tileIndex) { int colA = tileIndex * TILE_WIDTH + threadIdx.x; //Collaborative loading of A and B tiles into shared memory if (row < matrixSize && colA < matrixSize) matrixSharedA[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = matrixA[row * matrixSize + colA]; else matrixSharedA[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = 0; int rowB = tileIndex * TILE_WIDTH + threadIdx.y; if (col < matrixSize && rowB < matrixSize) matrixSharedB[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = matrixBTransposed[(col) * matrixSize + rowB]; else matrixSharedB[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) sum += matrixSharedA[ELEM(threadIdx.y, k, TILE_WIDTH)] * matrixSharedB[ELEM(k, threadIdx.x, TILE_WIDTH)]; __syncthreads(); } if (row < matrixSize && col < matrixSize) { matrixC[ELEM(row, col, matrixSize)] = sum; } } int getSPcores(hipDeviceProp_t devProp) { int cores = 0; int mp = devProp.multiProcessorCount; switch (devProp.major) { case 2: // Fermi if (devProp.minor == 1) cores = mp * 48; else cores = mp * 32; break; case 3: // Kepler cores = mp * 192; break; case 5: // Maxwell cores = mp * 128; break; case 6: // Pascal if (devProp.minor == 1) cores = mp * 128; else if (devProp.minor == 0) cores = mp * 64; else printf("Unknown device type\n"); break; default: printf("Unknown device type\n"); break; } return cores; } int getBlockSize() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); return prop.major < 2 ? 16 : 32; } return 16; } void getDeviceInfo() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6); cout << "ECC enabled: " << prop.ECCEnabled << endl; cout << "Warp size: " << prop.warpSize << endl; cout << "Shared Memory Per Block: " << prop.sharedMemPerBlock << endl; cout << "Shared Memory Per Multiprocessor: " << prop.sharedMemPerMultiprocessor << endl; cout << "Global Memory : " << prop.totalGlobalMem << endl; cout << "Concurrent Kernels : " << prop.concurrentKernels << endl; cout << "Integrated : " << prop.integrated << endl; cout << "Multiprocessor Count : " << prop.multiProcessorCount << endl; cout << "Cuda cores: " << getSPcores(prop) << endl; cout << "Concurrent Managed Access: " << prop.concurrentManagedAccess << endl; cout << "Max grid Size: " << prop.maxGridSize << endl; cout << "Max thread Dim: " << prop.maxThreadsDim << endl; cout << "Max thread per block: " << prop.maxThreadsPerBlock << endl; cout << "Max thread per multiprocessor: " << prop.maxThreadsPerMultiProcessor << endl; cout << "Active warps: " << prop.maxThreadsPerMultiProcessor / prop.warpSize << endl; } } double diffTime(timeval& tv1, timeval& tv2) { return (double) ((tv2.tv_usec - tv1.tv_usec) / 1000 + (double) (tv2.tv_sec - tv1.tv_sec) * 1000); } void printMatrix(int matrixSize, MATRIX_TYPE* h_matrixC) { gettimeofday(&tv1, NULL); printMatrix(h_matrixC, matrixSize, matrixSize); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "Print time:" << diff << endl; } int main(int argc, char **argv) { //getDeviceInfo(); hipSetDevice(0); int matrixSize = MATRIX_SIZE; hipError_t err; MATRIX_TYPE * h_matrixC_CPU; #if MATRIX_SET_1 == true MATRIX_TYPE * h_matrixA = createMatrix(matrixSize, matrixSize, 1); MATRIX_TYPE * h_matrixB = createMatrix(matrixSize, matrixSize, 1); MATRIX_TYPE * h_matrixC = createMatrix(matrixSize, matrixSize, 0); #else MATRIX_TYPE * h_matrixA = createMatrix(matrixSize, matrixSize); MATRIX_TYPE * h_matrixB = createMatrixTransposta(matrixSize, matrixSize, 1, true); MATRIX_TYPE * h_matrixC = createMatrix(matrixSize, matrixSize, 0); #endif //Set size of blocks and threads //int blockSize = getBlockSize(); int blockSize = TILE_WIDTH; dim3 thread(blockSize, blockSize, 1); int gridSizeX = matrixSize / thread.x; gridSizeX += (matrixSize % thread.x) == 0 ? 0 : 1; int gridSizeY = matrixSize / thread.y; gridSizeY += (matrixSize % thread.y) == 0 ? 0 : 1; dim3 grid(gridSizeX, gridSizeY, 1); cout << "Grid: " << grid.x << " - " << grid.y << " Thread: " << thread.x << " - " << thread.y << endl; //Alocao das matrizes no dispositivo MATRIX_TYPE * d_matrixA, *d_matrixB, *d_matrixC, *d_matrixBTransposed, *d_matrixC_CPU; hipMalloc((void **) &d_matrixA, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); hipMalloc((void **) &d_matrixB, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); hipMalloc((void **) &d_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); //Copia dos dados para o dispositivo hipMemcpy(d_matrixA, h_matrixA, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, hipMemcpyHostToDevice); hipMemcpy(d_matrixB, h_matrixB, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, hipMemcpyHostToDevice); hipMemcpy(d_matrixC, h_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, hipMemcpyHostToDevice); #if PRINT == true cout << "A: " << endl; printMatrix(h_matrixA, matrixSize, matrixSize); cout << "B: " << endl; printMatrix(h_matrixB, matrixSize, matrixSize); #endif #if CPU_MULT == true //CPU MULT h_matrixC_CPU = createMatrix(matrixSize, matrixSize, 0); gettimeofday(&tv1, NULL); multMatrix(h_matrixA, h_matrixB, h_matrixC_CPU, matrixSize); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "CPU Mult time: " << diff << endl; hipMalloc((void **) &d_matrixC_CPU, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); hipMemcpy(d_matrixC_CPU, h_matrixC_CPU, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, hipMemcpyHostToDevice); #if PRINT == true printMatrix(matrixSize, h_matrixC_CPU); #endif #endif #if CUDA_MULT == true //CUDA MULT gettimeofday(&tv1, NULL); hipLaunchKernelGGL(( multiMatrixCUDA), dim3(grid), dim3(thread), 0, 0, d_matrixA, d_matrixB, d_matrixC, matrixSize); hipDeviceSynchronize(); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "CUDA Mult time: " << diff << endl; hipMemcpy(h_matrixC, d_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, hipMemcpyDeviceToHost); #if PRINT == true printMatrix(h_matrixC, matrixSize, matrixSize); #endif #endif #if CUDA_MULT_TRANSPOSE == true //CUDA MULT B Transpose hipMalloc((void **) &d_matrixBTransposed, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); hipLaunchKernelGGL(( matrixTranspose), dim3(grid), dim3(thread), 0, 0, d_matrixB, d_matrixBTransposed,matrixSize); hipDeviceSynchronize(); gettimeofday(&tv1, NULL); hipLaunchKernelGGL(( multiMatrixCUDABTranspose), dim3(grid), dim3(thread), 0, 0, d_matrixA, d_matrixBTransposed, d_matrixC, matrixSize); hipDeviceSynchronize(); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "CUDA Mult B Transpose time: " << diff << endl; err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipMemcpy(h_matrixC, d_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, hipMemcpyDeviceToHost); #if PRINT == true printMatrix(h_matrixC, matrixSize, matrixSize); #endif #endif #if CUDA_MULT_SHARED //CUDA MULT Shared gettimeofday(&tv1, NULL); hipLaunchKernelGGL(( multiMatrixCUDAShared), dim3(grid), dim3(thread), 0, 0, d_matrixA, d_matrixB, d_matrixC,matrixSize); hipDeviceSynchronize(); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "CUDA Mult Shared time: " << diff << endl; err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipMemcpy(h_matrixC, d_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, hipMemcpyDeviceToHost); #if PRINT == true printMatrix(matrixSize, h_matrixC); #endif #endif #if CUDA_MULT_SHARED_TRANSPOSE //CUDA MULT Shared hipMalloc((void **) &d_matrixBTransposed, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); hipLaunchKernelGGL(( matrixTranspose), dim3(grid), dim3(thread), 0, 0, d_matrixB, d_matrixBTransposed,matrixSize); hipDeviceSynchronize(); gettimeofday(&tv1, NULL); hipLaunchKernelGGL(( multiMatrixCUDASharedTransposed), dim3(grid), dim3(thread), 0, 0, d_matrixA, d_matrixBTransposed, d_matrixC, matrixSize); hipDeviceSynchronize(); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "CUDA Mult Shared transposed time: " << diff << endl; err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipMemcpy(h_matrixC, d_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, hipMemcpyDeviceToHost); if (h_matrixC_CPU != NULL) { cout << "Comparing result:" << endl; bool * h_result = (bool *) malloc( sizeof(bool) * matrixSize * matrixSize); bool * d_result; hipMalloc((void **) &d_result, sizeof(bool) * matrixSize * matrixSize); hipLaunchKernelGGL(( matrixCompare), dim3(grid), dim3(thread), 0, 0, d_result,d_matrixC,d_matrixC_CPU, matrixSize); hipDeviceSynchronize(); hipMemcpy(h_result, d_result, sizeof(bool) * matrixSize * matrixSize, hipMemcpyDeviceToHost); for (int row = 0; row < matrixSize; ++row) { for (int column = 0; column < matrixSize; ++column) { if (!h_result[ELEM(row, column, matrixSize)]) { cout << "Error on position: " << row << "," << column << endl; } } } } #if PRINT == true printMatrix(matrixSize, h_matrixC); hipMemcpy(h_matrixB, d_matrixBTransposed, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, hipMemcpyDeviceToHost); cout << "Matrix B transposed:" << endl; printMatrix(matrixSize, h_matrixB); #endif #endif free(h_matrixA); free(h_matrixB); free(h_matrixC); hipFree(d_matrixA); hipFree(d_matrixB); hipFree(d_matrixC); hipFree(d_matrixBTransposed); hipFree(d_matrixC_CPU); return 0; }
21ae3c56b7f3eadf3f09e9f346f5748fd1bf20b3.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <common_functions.h> #include <cuda_runtime_api.h> #include <driver_types.h> #include <host_defines.h> #include <stdio.h> #include <sys/time.h> #include <iostream> #include <ostream> //Param #define PRINT false //Running params #define CPU_MULT true #define CUDA_MULT true #define CUDA_MULT_TRANSPOSE true #define CUDA_MULT_SHARED true #define CUDA_MULT_SHARED_TRANSPOSE true //Matrix params #define MATRIX_SET_1 false #define MATRIX_SIZE 1000 #define MATRIX_TYPE float const int TILE_WIDTH = 16; //Macros #define ELEM(row,column,rowSize) (column+row*rowSize) /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. * * See cuda.h for error code descriptions. */ #define CHECK_CUDA_RESULT(N) { \ CUresult result = N; \ if (result != 0) { \ printf("CUDA call on line %d returned error %d\n", __LINE__, \ result); \ exit(1); \ } \ } using namespace std; struct timeval tv1, tv2; double diff; __host__ void printMatrix(MATRIX_TYPE* matrix, int rowCount, int columnCount) { for (int row = 0; row < rowCount; ++row) { for (int column = 0; column < columnCount; ++column) { cout << matrix[ELEM(row, column, rowCount)] << " "; } cout << endl; } } __host__ MATRIX_TYPE * createMatrixTransposta(int rowCount, int columnCount, int initValue, bool colValue) { MATRIX_TYPE * matrix = (MATRIX_TYPE*) malloc( sizeof(MATRIX_TYPE) * rowCount * columnCount); for (int row = 0; row < rowCount; ++row) { for (int column = 0; column < columnCount; ++column) { matrix[ELEM(row, column, rowCount)] = colValue ? column + 1 : initValue; } } return matrix; } __host__ MATRIX_TYPE * createMatrix(int rowCount, int columnCount, int initValue, bool rowValue) { MATRIX_TYPE * matrix = (MATRIX_TYPE*) malloc( sizeof(MATRIX_TYPE) * rowCount * columnCount); for (int row = 0; row < rowCount; ++row) { for (int column = 0; column < columnCount; ++column) { matrix[ELEM(row, column, rowCount)] = rowValue ? row + 1 : initValue; } } return matrix; } __host__ MATRIX_TYPE * createMatrix(int rowCount, int columnCount) { return createMatrix(rowCount, columnCount, 1, true); } __host__ MATRIX_TYPE * createMatrix(int rowCount, int columnCount, int initValue) { return createMatrix(rowCount, columnCount, initValue, false); } __global__ void matrixCompare(bool* result, MATRIX_TYPE* matrixA, MATRIX_TYPE * matrixB, int matrixSize) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (col < matrixSize && row < matrixSize) { result[ELEM(row, col, matrixSize)] = (matrixA[ELEM(row, col, matrixSize)] == matrixA[ELEM(row, col, matrixSize)]); } } __host__ void multMatrix(MATRIX_TYPE * matrixA, MATRIX_TYPE * matrixB, MATRIX_TYPE * matrixC, int matrixSize) { for (int row = 0; row < matrixSize; ++row) { for (int column = 0; column < matrixSize; ++column) { MATRIX_TYPE sum = 0; for (int k = 0; k < matrixSize; ++k) { sum += matrixA[ELEM(row, k, matrixSize)] * matrixB[ELEM(k, column, matrixSize)]; } matrixC[ELEM(row, column, matrixSize)] = sum; } } } __global__ void multiMatrixCUDA(MATRIX_TYPE * matrixA, MATRIX_TYPE * matrixB, MATRIX_TYPE * matrixC, int matrixSize) { unsigned int column = (blockDim.x * blockIdx.x) + threadIdx.x; unsigned int row = (blockDim.y * blockIdx.y) + threadIdx.y; if (column < matrixSize && row < matrixSize) { MATRIX_TYPE sum = 0; for (int k = 0; k < matrixSize; ++k) { sum += matrixA[ELEM(row, k, matrixSize)] * matrixB[ELEM(k, column, matrixSize)]; } matrixC[ELEM(row, column, matrixSize)] = sum; } } __global__ void matrixTranspose(MATRIX_TYPE * matrix_in, MATRIX_TYPE * matrix_out, int matrixSize) { int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < matrixSize && row < matrixSize) { matrix_out[ELEM(row, column, matrixSize)] = matrix_in[ELEM(column, row, matrixSize)]; } } __global__ void multiMatrixCUDABTranspose(MATRIX_TYPE * matrixA, MATRIX_TYPE * matrixBTransposed, MATRIX_TYPE * matrixC, int matrixSize) { int column = (blockDim.x * blockIdx.x) + threadIdx.x; int row = (blockDim.y * blockIdx.y) + threadIdx.y; if (column < matrixSize && row < matrixSize) { MATRIX_TYPE sum = 0; for (int k = 0; k < matrixSize; ++k) { sum += matrixA[ELEM(row, k, matrixSize)] * matrixBTransposed[ELEM(column, k, matrixSize)]; } matrixC[ELEM(row, column, matrixSize)] = sum; } } __global__ void matMultTileCuda(const float *A, const float *B, float *C, int N){ __shared__ float a_tile[TILE_WIDTH][TILE_WIDTH], b_tile[TILE_WIDTH][TILE_WIDTH]; int qtd_tiles = N/TILE_WIDTH + (N%TILE_WIDTH==0?0:1); int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; int offset; float sum = 0.0; for (int tile_ind = 0; tile_ind < qtd_tiles; ++tile_ind) { offset = tile_ind*TILE_WIDTH; if(i<N && offset+threadIdx.x< N){ a_tile[threadIdx.y][threadIdx.x] = A[ELEM(i, offset+threadIdx.x, N)]; } else{ a_tile[threadIdx.y][threadIdx.x] = 0.0; } if(threadIdx.y+offset<N && j< N){ b_tile[threadIdx.y][threadIdx.x] = B[ELEM(threadIdx.y+offset, j, N)]; } else{ b_tile[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { sum += a_tile[threadIdx.y][k]*b_tile[k][threadIdx.x]; } __syncthreads(); } if(i<N && j<N) C[ELEM(i,j,N)] = sum; } __global__ void multiMatrixCUDAShared(MATRIX_TYPE* matrixA, MATRIX_TYPE* matrixB, MATRIX_TYPE* matrixC, int matrixSize) { __shared__ int matrixSharedA[TILE_WIDTH * TILE_WIDTH]; __shared__ int matrixSharedB[TILE_WIDTH * TILE_WIDTH]; //Row and column of element to calculate int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col = blockIdx.x * TILE_WIDTH + threadIdx.x; MATRIX_TYPE sum = 0; int tileCount = ((matrixSize - 1) / TILE_WIDTH) + 1; //Iterate tiles to compute the sum for (int tileIndex = 0; tileIndex < tileCount; ++tileIndex) { int colA = tileIndex * TILE_WIDTH + threadIdx.x; //Collaborative loading of A and B tiles into shared memory if (row < matrixSize && colA < matrixSize) matrixSharedA[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = matrixA[row * matrixSize + colA]; else matrixSharedA[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = 0; int rowB = tileIndex * TILE_WIDTH + threadIdx.y; if (col < matrixSize && rowB < matrixSize) matrixSharedB[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = matrixB[(rowB) * matrixSize + col]; else matrixSharedB[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) sum += matrixSharedA[ELEM(threadIdx.y, k, TILE_WIDTH)] * matrixSharedB[ELEM(k, threadIdx.x, TILE_WIDTH)]; __syncthreads(); } if (row < matrixSize && col < matrixSize) { matrixC[ELEM(row, col, matrixSize)] = sum; } } __global__ void multiMatrixCUDASharedTransposed(MATRIX_TYPE* matrixA, MATRIX_TYPE* matrixBTransposed, MATRIX_TYPE* matrixC, int matrixSize) { __shared__ int matrixSharedA[TILE_WIDTH * TILE_WIDTH]; __shared__ int matrixSharedB[TILE_WIDTH * TILE_WIDTH]; //Row and column of element to calculate int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col = blockIdx.x * TILE_WIDTH + threadIdx.x; MATRIX_TYPE sum = 0; int tileCount = ((matrixSize - 1) / TILE_WIDTH) + 1; //Iterate tiles to compute the sum for (int tileIndex = 0; tileIndex < tileCount; ++tileIndex) { int colA = tileIndex * TILE_WIDTH + threadIdx.x; //Collaborative loading of A and B tiles into shared memory if (row < matrixSize && colA < matrixSize) matrixSharedA[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = matrixA[row * matrixSize + colA]; else matrixSharedA[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = 0; int rowB = tileIndex * TILE_WIDTH + threadIdx.y; if (col < matrixSize && rowB < matrixSize) matrixSharedB[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = matrixBTransposed[(col) * matrixSize + rowB]; else matrixSharedB[ELEM(threadIdx.y, threadIdx.x, TILE_WIDTH)] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) sum += matrixSharedA[ELEM(threadIdx.y, k, TILE_WIDTH)] * matrixSharedB[ELEM(k, threadIdx.x, TILE_WIDTH)]; __syncthreads(); } if (row < matrixSize && col < matrixSize) { matrixC[ELEM(row, col, matrixSize)] = sum; } } int getSPcores(cudaDeviceProp devProp) { int cores = 0; int mp = devProp.multiProcessorCount; switch (devProp.major) { case 2: // Fermi if (devProp.minor == 1) cores = mp * 48; else cores = mp * 32; break; case 3: // Kepler cores = mp * 192; break; case 5: // Maxwell cores = mp * 128; break; case 6: // Pascal if (devProp.minor == 1) cores = mp * 128; else if (devProp.minor == 0) cores = mp * 64; else printf("Unknown device type\n"); break; default: printf("Unknown device type\n"); break; } return cores; } int getBlockSize() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); return prop.major < 2 ? 16 : 32; } return 16; } void getDeviceInfo() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6); cout << "ECC enabled: " << prop.ECCEnabled << endl; cout << "Warp size: " << prop.warpSize << endl; cout << "Shared Memory Per Block: " << prop.sharedMemPerBlock << endl; cout << "Shared Memory Per Multiprocessor: " << prop.sharedMemPerMultiprocessor << endl; cout << "Global Memory : " << prop.totalGlobalMem << endl; cout << "Concurrent Kernels : " << prop.concurrentKernels << endl; cout << "Integrated : " << prop.integrated << endl; cout << "Multiprocessor Count : " << prop.multiProcessorCount << endl; cout << "Cuda cores: " << getSPcores(prop) << endl; cout << "Concurrent Managed Access: " << prop.concurrentManagedAccess << endl; cout << "Max grid Size: " << prop.maxGridSize << endl; cout << "Max thread Dim: " << prop.maxThreadsDim << endl; cout << "Max thread per block: " << prop.maxThreadsPerBlock << endl; cout << "Max thread per multiprocessor: " << prop.maxThreadsPerMultiProcessor << endl; cout << "Active warps: " << prop.maxThreadsPerMultiProcessor / prop.warpSize << endl; } } double diffTime(timeval& tv1, timeval& tv2) { return (double) ((tv2.tv_usec - tv1.tv_usec) / 1000 + (double) (tv2.tv_sec - tv1.tv_sec) * 1000); } void printMatrix(int matrixSize, MATRIX_TYPE* h_matrixC) { gettimeofday(&tv1, NULL); printMatrix(h_matrixC, matrixSize, matrixSize); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "Print time:" << diff << endl; } int main(int argc, char **argv) { //getDeviceInfo(); cudaSetDevice(0); int matrixSize = MATRIX_SIZE; cudaError_t err; MATRIX_TYPE * h_matrixC_CPU; #if MATRIX_SET_1 == true MATRIX_TYPE * h_matrixA = createMatrix(matrixSize, matrixSize, 1); MATRIX_TYPE * h_matrixB = createMatrix(matrixSize, matrixSize, 1); MATRIX_TYPE * h_matrixC = createMatrix(matrixSize, matrixSize, 0); #else MATRIX_TYPE * h_matrixA = createMatrix(matrixSize, matrixSize); MATRIX_TYPE * h_matrixB = createMatrixTransposta(matrixSize, matrixSize, 1, true); MATRIX_TYPE * h_matrixC = createMatrix(matrixSize, matrixSize, 0); #endif //Set size of blocks and threads //int blockSize = getBlockSize(); int blockSize = TILE_WIDTH; dim3 thread(blockSize, blockSize, 1); int gridSizeX = matrixSize / thread.x; gridSizeX += (matrixSize % thread.x) == 0 ? 0 : 1; int gridSizeY = matrixSize / thread.y; gridSizeY += (matrixSize % thread.y) == 0 ? 0 : 1; dim3 grid(gridSizeX, gridSizeY, 1); cout << "Grid: " << grid.x << " - " << grid.y << " Thread: " << thread.x << " - " << thread.y << endl; //Alocação das matrizes no dispositivo MATRIX_TYPE * d_matrixA, *d_matrixB, *d_matrixC, *d_matrixBTransposed, *d_matrixC_CPU; cudaMalloc((void **) &d_matrixA, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); cudaMalloc((void **) &d_matrixB, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); cudaMalloc((void **) &d_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); //Copia dos dados para o dispositivo cudaMemcpy(d_matrixA, h_matrixA, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, cudaMemcpyHostToDevice); cudaMemcpy(d_matrixB, h_matrixB, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, cudaMemcpyHostToDevice); cudaMemcpy(d_matrixC, h_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, cudaMemcpyHostToDevice); #if PRINT == true cout << "A: " << endl; printMatrix(h_matrixA, matrixSize, matrixSize); cout << "B: " << endl; printMatrix(h_matrixB, matrixSize, matrixSize); #endif #if CPU_MULT == true //CPU MULT h_matrixC_CPU = createMatrix(matrixSize, matrixSize, 0); gettimeofday(&tv1, NULL); multMatrix(h_matrixA, h_matrixB, h_matrixC_CPU, matrixSize); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "CPU Mult time: " << diff << endl; cudaMalloc((void **) &d_matrixC_CPU, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); cudaMemcpy(d_matrixC_CPU, h_matrixC_CPU, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, cudaMemcpyHostToDevice); #if PRINT == true printMatrix(matrixSize, h_matrixC_CPU); #endif #endif #if CUDA_MULT == true //CUDA MULT gettimeofday(&tv1, NULL); multiMatrixCUDA<<<grid, thread>>>(d_matrixA, d_matrixB, d_matrixC, matrixSize); cudaDeviceSynchronize(); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "CUDA Mult time: " << diff << endl; cudaMemcpy(h_matrixC, d_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, cudaMemcpyDeviceToHost); #if PRINT == true printMatrix(h_matrixC, matrixSize, matrixSize); #endif #endif #if CUDA_MULT_TRANSPOSE == true //CUDA MULT B Transpose cudaMalloc((void **) &d_matrixBTransposed, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); matrixTranspose<<<grid, thread>>>(d_matrixB, d_matrixBTransposed,matrixSize); cudaDeviceSynchronize(); gettimeofday(&tv1, NULL); multiMatrixCUDABTranspose<<<grid, thread>>>(d_matrixA, d_matrixBTransposed, d_matrixC, matrixSize); cudaDeviceSynchronize(); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "CUDA Mult B Transpose time: " << diff << endl; err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaMemcpy(h_matrixC, d_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, cudaMemcpyDeviceToHost); #if PRINT == true printMatrix(h_matrixC, matrixSize, matrixSize); #endif #endif #if CUDA_MULT_SHARED //CUDA MULT Shared gettimeofday(&tv1, NULL); multiMatrixCUDAShared<<<grid, thread>>>(d_matrixA, d_matrixB, d_matrixC,matrixSize); cudaDeviceSynchronize(); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "CUDA Mult Shared time: " << diff << endl; err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaMemcpy(h_matrixC, d_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, cudaMemcpyDeviceToHost); #if PRINT == true printMatrix(matrixSize, h_matrixC); #endif #endif #if CUDA_MULT_SHARED_TRANSPOSE //CUDA MULT Shared cudaMalloc((void **) &d_matrixBTransposed, sizeof(MATRIX_TYPE) * matrixSize * matrixSize); matrixTranspose<<<grid, thread>>>(d_matrixB, d_matrixBTransposed,matrixSize); cudaDeviceSynchronize(); gettimeofday(&tv1, NULL); multiMatrixCUDASharedTransposed<<<grid, thread>>>(d_matrixA, d_matrixBTransposed, d_matrixC, matrixSize); cudaDeviceSynchronize(); gettimeofday(&tv2, NULL); diff = diffTime(tv1, tv2); cout << "CUDA Mult Shared transposed time: " << diff << endl; err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaMemcpy(h_matrixC, d_matrixC, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, cudaMemcpyDeviceToHost); if (h_matrixC_CPU != NULL) { cout << "Comparing result:" << endl; bool * h_result = (bool *) malloc( sizeof(bool) * matrixSize * matrixSize); bool * d_result; cudaMalloc((void **) &d_result, sizeof(bool) * matrixSize * matrixSize); matrixCompare<<<grid, thread>>>(d_result,d_matrixC,d_matrixC_CPU, matrixSize); cudaDeviceSynchronize(); cudaMemcpy(h_result, d_result, sizeof(bool) * matrixSize * matrixSize, cudaMemcpyDeviceToHost); for (int row = 0; row < matrixSize; ++row) { for (int column = 0; column < matrixSize; ++column) { if (!h_result[ELEM(row, column, matrixSize)]) { cout << "Error on position: " << row << "," << column << endl; } } } } #if PRINT == true printMatrix(matrixSize, h_matrixC); cudaMemcpy(h_matrixB, d_matrixBTransposed, sizeof(MATRIX_TYPE) * matrixSize * matrixSize, cudaMemcpyDeviceToHost); cout << "Matrix B transposed:" << endl; printMatrix(matrixSize, h_matrixB); #endif #endif free(h_matrixA); free(h_matrixB); free(h_matrixC); cudaFree(d_matrixA); cudaFree(d_matrixB); cudaFree(d_matrixC); cudaFree(d_matrixBTransposed); cudaFree(d_matrixC_CPU); return 0; }
0334ef8eb7ce0e09cc9780c9ad544b42a23be5ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> // b: batch size; n: points num; xyz1,xyz2: two tensor,(b,n,3); matchl,matchr: two tensors,(b,n); cost,tensor,(b,n,n) // __restrict__ __global__ void AuctionMatchKernel(int b,int n,const float * __restrict__ xyz1,const float * __restrict__ xyz2,int * matchl,int * matchr,float * cost){ //this kernel handles up to 4096 points const int NMax=4096; // // __shared__ https://www.cxyzjd.com/article/dcrmg/54880471 __shared__ short Queue[NMax]; __shared__ short matchrbuf[NMax]; __shared__ float pricer[NMax]; __shared__ float bests[32][3]; __shared__ int qhead,qlen; const int BufLen=2048; __shared__ float buf[BufLen]; // buf /** n*n 51212nn/512 n 1k2k-1 **/ // for for (int bno=blockIdx.x;bno<b;bno+=gridDim.x){ // gridDim.x32blockIdx.x // batchpatchblock /** bnoblockIdxgridbno22+32=34batch_size32 batch_size32batch_sizeblock batch_size64gridDim32batch batch_sizebatch gridDim32batch_size32 **/ int cnt=0; float tolerance=1e-4; // n*pblock512 // bno*n ~ bno*(n+1) // n // threadIdx.xbno*n+jbno*n+j+blockDim.xbno*n+j+blockDim.x*2... // for for (int j=threadIdx.x;j<n;j+=blockDim.x) matchl[bno*n+j]=-1; // matchl, (b,n), -1 for (int j=threadIdx.x;j<n;j+=blockDim.x) matchrbuf[j]=-1; // matchrbuf, (n=4096,1), -1 for (int j=threadIdx.x;j<n;j+=blockDim.x) Queue[j]=j; // Queue(n=4096,1)0~4095 for (int j=threadIdx.x;j<n;j+=blockDim.x) pricer[j]=0; // pricer(n=4096,1)0 const int Block=512; // 512xyz1xyz2 // b*n*3bnoblockbnopatch (batch_sizeblockpatchk0) for (int k0=0;k0<n;k0+=Block){ // k0512indexk1 int k1=min(n,k0+Block); // nBlockk1-k0512512 for (int k=threadIdx.x;k<(k1-k0)*3;k+=blockDim.x) // threadIdx.x from 0 to 511, k from 0 to 512*3 buf[k]=xyz1[bno*n*3+k0*3+k]; // buf(2048,1); xyz1,(b,n,3) // bno*n*3bnoblockbnopatch // k0512k0*3for512k1=min(n,k0+Block)n /** buf (b,n,3)buf512512*3=1536 2048 batchbuf512b*n*3512 `for (int k0=0;k0<n;k0+=Block)`Block=512512 k0 = 0+512+512=10241024k0*3 k0*3512512k1=min(n,k0+Block) 512gridblockk0 patchblockpatch bno%32blockthreadIdx.xbno%32patch512*3 threadIdx.xthreadIdx.x+blockDim.xthreadIdx.x+blockDim.x+blockDim.x bnobatch_sizepatchforbatch batch_sizeblock_numblock_numpatch, **/ // gridpatchpatch512buf // bufpatch512 // buf__shared__ __syncthreads(); // buf /** idxidxidx+blockDim.x 0blockDim.x-1blockDim.x blockDim.x2*blockDim.x-1 **/ /** cost(b,n,n)bpatchngtpatchn i,j,kpredipatchjgtipatchk bnojbnopatch512buf[] jgtbnopatchbuf512 costbno,k,jbno*n*n+k*n+j cost[blockIdx.x*n*n+k*n+j]blockIdx.x0~32batch gridblockIdx.xb cost costblockIdx.x*n*ncostb*n*n blockIdx.xbblockb **/ for (int j=threadIdx.x;j<n;j+=blockDim.x){ // j from 0 to 4096patch // bno*n*3blockpatchj*3j float x2=xyz2[bno*n*3+j*3+0]; float y2=xyz2[bno*n*3+j*3+1]; float z2=xyz2[bno*n*3+j*3+2]; for (int k=k0;k<k1;k++){ // k0512patchindexk1 float x1=buf[(k-k0)*3+0]; float y1=buf[(k-k0)*3+1]; float z1=buf[(k-k0)*3+2]; float d=sqrtf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2)); cost[blockIdx.x*n*n+k*n+j]=d; } } __syncthreads(); // 512gt } // forcost //calculate the distacne if (threadIdx.x==0){ // // __shared__ // // __shared__ // qhead=0; qlen=n; } __syncthreads(); // int loaded=0; // float value9,value10,value11,value12,value13,value14,value15,value16; // while (qlen){ // nnwhile int i=Queue[qhead]; // xyz1i // int i2; // if (qhead+1<n) // ii2iii2 i2=Queue[qhead+1]; // xyz1i2,i else i2=Queue[0]; float best=1e38f,best2=1e38f; // int bestj=0; // if (n==blockDim.x*8){ // n84096/512=885124096 int j=threadIdx.x; float value1,value2,value3,value4,value5,value6,value7,value8; if (loaded){ // pricer409610 value1=value9+pricer[j]; value2=value10+pricer[j+blockDim.x]; value3=value11+pricer[j+blockDim.x*2]; value4=value12+pricer[j+blockDim.x*3]; value5=value13+pricer[j+blockDim.x*4]; value6=value14+pricer[j+blockDim.x*5]; value7=value15+pricer[j+blockDim.x*6]; value8=value16+pricer[j+blockDim.x*7]; loaded=0; }else{ /** cost[blockIdx.x*n*n+i*n+j+blockDim.x * t] xyz1blockIdx.xpatchixyz2blockIdx.xpatchj+blockDim.x * t pricer(4096,1),0 pricer[j+blockDim.x*t]xyz1 32512(blockIdx.x*n*n) xyz1(i*n) value1~8cost xyz1xyz2patch4096cost 409684096blockDim.x 8value1~8 **/ value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2]; value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3]; value5=cost[blockIdx.x*n*n+i*n+j+blockDim.x*4]+pricer[j+blockDim.x*4]; value6=cost[blockIdx.x*n*n+i*n+j+blockDim.x*5]+pricer[j+blockDim.x*5]; value7=cost[blockIdx.x*n*n+i*n+j+blockDim.x*6]+pricer[j+blockDim.x*6]; value8=cost[blockIdx.x*n*n+i*n+j+blockDim.x*7]+pricer[j+blockDim.x*7]; /** value9~16xyz1blockIdx.xpatchi2xyz2blockIdx.xpatch8cost i2cost xyz1i2xyz2blockIdx.xpatchcost value9~16 **/ value9=cost[blockIdx.x*n*n+i2*n+j]; value10=cost[blockIdx.x*n*n+i2*n+j+blockDim.x]; value11=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*2]; value12=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*3]; value13=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*4]; value14=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*5]; value15=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*6]; value16=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*7]; // value1~8xyz1ixyz2cost // value9~16xyz1i2xyz2cost loaded=qlen>1; // qlenxyz1nloadedTrueFalse } int vj,vj2,vj3,vj4; if (value1<value2){ vj=j; // value1index }else{ vj=j+blockDim.x; // value2index float t=value1; // value1value2value1 value1=value2; value2=t; } if (value3<value4){ vj2=j+blockDim.x*2; }else{ vj2=j+blockDim.x*3; float t=value3; value3=value4; value4=t; } if (value5<value6){ vj3=j+blockDim.x*4; }else{ vj3=j+blockDim.x*5; float t=value5; value5=value6; value6=t; } if (value7<value8){ vj4=j+blockDim.x*6; }else{ vj4=j+blockDim.x*7; float t=value7; value7=value8; value8=t; } // value1~8 if (value1<value3){ value2=fminf(value2,value3); // 1<2343<4, 23value2 }else{ value2=fminf(value1,value4); // 3<1241<2, 14value2 value1=value3; vj=vj2; } if (value5<value7){ value6=fminf(value6,value7); // 5<6787<8, 67value6 }else{ value6=fminf(value5,value8); // 7<5685<6, 58value6 value5=value7; vj3=vj4; } // value1357value1value5value2value6 if (value1<value5){ best=value1; bestj=vj; best2=fminf(value2,value5); // 1<256,5<6 25best2 }else{ best2=fminf(value1,value6); // 5<126,1<2 16best2 best=value5; bestj=vj3; } // bestbest2 }else if (n>=blockDim.x*4){ for (int j=threadIdx.x;j<n;j+=blockDim.x*4){ float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; float value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2]; float value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3]; int vj,vj2; if (value1<value2){ vj=j; }else{ vj=j+blockDim.x; float t=value1; value1=value2; value2=t; } if (value3<value4){ vj2=j+blockDim.x*2; }else{ vj2=j+blockDim.x*3; float t=value3; value3=value4; value4=t; } if (value1<value3){ value2=fminf(value2,value3); }else{ value2=fminf(value1,value4); value1=value3; vj=vj2; } if (best<value1){ best2=fminf(best2,value1); }else{ best2=fminf(best,value2); best=value1; bestj=vj; } } }else if (n>=blockDim.x*2){ for (int j=threadIdx.x;j<n;j+=blockDim.x*2){ float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; int vj; if (value1<value2){ vj=j; }else{ vj=j+blockDim.x; float t=value1; value1=value2; value2=t; } if (best<value1){ best2=fminf(best2,value1); }else{ best2=fminf(best,value2); best=value1; bestj=vj; } } }else{ // xyz1blockIdx.xpatchixyz2blockIdx.xpatch // bestbest2 // for1i2 // n/blockDim.x+1n/blockDim.x+1 // todo for (int j=threadIdx.x;j<n;j+=blockDim.x){ float value=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; if (best<value){ best2=fminf(best2,value); }else{ best2=best; bestj=j; best=value; } } } // /** 409632 **/ for (int i=16;i>0;i>>=1){ // i = 168421 // warp0~32i bestbest2bestj b1b2bj // bestswarp03264480 // warpwarp32 float b1=__shfl_down_sync(0xFFFFFFFF,best,i,32); float b2=__shfl_down_sync(0xFFFFFFFF,best2,i,32); int bj=__shfl_down_sync(0xFFFFFFFF,bestj,i,32); // if (best<b1){ // best<b1<b2, best<best2, b1best2 best2=fminf(b1,best2); }else{ // b1<best<best2, b1<b2, bestb2 best=b1; best2=fminf(best,b2); bestj=bj; } } if ((threadIdx.x&31)==0){ // 03264480% /** bests(32, 3)51251232bests threadIdx.xbests[threadIdx.x % 32][:] id03264480bests01215 **/ bests[threadIdx.x>>5][0]=best; bests[threadIdx.x>>5][1]=best2; *(int*)&bests[threadIdx.x>>5][2]=bestj; // } __syncthreads(); // bests int nn=blockDim.x>>5; // 512>>5=16 /** xyzxyzpatch409616bests 4096 0 **/ if (threadIdx.x<nn){ best=bests[threadIdx.x][0]; best2=bests[threadIdx.x][1]; bestj=*(int*)&bests[threadIdx.x][2]; for (int i=nn>>1;i>0;i>>=1){ // i = 8,4,2,1 float b1=__shfl_down_sync(0xFFFFFFFF,best,i,32); float b2=__shfl_down_sync(0xFFFFFFFF,best2,i,32); int bj=__shfl_down_sync(0xFFFFFFFF,bestj,i,32); if (best<b1){ best2=fminf(b1,best2); }else{ best=b1; best2=fminf(best,b2); bestj=bj; } } } if (threadIdx.x==0){ float delta=best2-best+tolerance; // blockxyz1xyz24096 qhead++; qlen--; // qlen = 409640951 if (qhead>=n) // qheadxyz1patch qhead-=n; int old=matchrbuf[bestj]; // 40961bestj pricer[bestj]+=delta; // /** price **/ cnt++; // while if (old!=-1){ // matchrbuf-1old==1 int ql=qlen; int tail=qhead+ql; // tail = qlen=ql+1; if (tail>=n) tail-=n; Queue[tail]=old; } if (cnt==(40*n)){ if (tolerance==1.0) qlen=0; tolerance=fminf(1.0,tolerance*100); cnt=0; } } __syncthreads(); if (threadIdx.x==0){ // matchrbuf[bestj]=i; // bestji } } __syncthreads(); // matchrbuf__shared__ for (int j=threadIdx.x;j<n;j+=blockDim.x) // matchrbufmatchr // matchr matchr[bno*n+j]=matchrbuf[j]; for (int j=threadIdx.x;j<n;j+=blockDim.x) // matchlmatchr matchl[bno*n+matchrbuf[j]]=j; __syncthreads(); // matchrmatchlmatchl } } void AuctionMatchLauncher(int b,int n,const float * xyz1,const float * xyz2,int * matchl,int * matchr,float * cost){ hipLaunchKernelGGL(( AuctionMatchKernel), dim3(32),dim3(512), 0, 0, b,n,xyz1,xyz2,matchl,matchr,cost); // grid32blockblock512 }
0334ef8eb7ce0e09cc9780c9ad544b42a23be5ef.cu
#include <cstdio> // b: batch size; n: points num; xyz1,xyz2: two tensor,(b,n,3); matchl,matchr: two tensors,(b,n); cost,tensor,(b,n,n) // __restrict__保证不存在别名,那么在修改其中一个指针指向的内存中的值并不会影响另一个指正指向的内存 __global__ void AuctionMatchKernel(int b,int n,const float * __restrict__ xyz1,const float * __restrict__ xyz2,int * matchl,int * matchr,float * cost){ //this kernel handles up to 4096 points const int NMax=4096; // 最多可以处理的点数 // __shared__ 块内共享: https://www.cxyzjd.com/article/dcrmg/54880471 __shared__ short Queue[NMax]; __shared__ short matchrbuf[NMax]; __shared__ float pricer[NMax]; __shared__ float bests[32][3]; __shared__ int qhead,qlen; const int BufLen=2048; __shared__ float buf[BufLen]; // buf的用意是什么? /** 我们需要计算一个点云中所有点到另一个点云所有点的距离,总共n*n个数, 最简单的方式是我用一个模块中的512个线程每次处理点云1中的一个点到点云2中的n个点的距离,每个线程处理n/512个数, 这是一次循环,总共循环n次,每次循环,都要等所有线程的运算都结束了以后,才能进行下一次循环,只要还有线程忙碌,提前结束的线程都需要等待。 为了缓解这里的等待时间,可以一次处理点云1中的k个点到点云2中的所有点的距离,这样就减少了k-1次等待时间 **/ // 所有运算在这个for循环中完成 for (int bno=blockIdx.x;bno<b;bno+=gridDim.x){ // gridDim.x为32,blockIdx.x为当前线程所在的块 // 这里其实就是将batch分组,保证每次处理的patch数量不多于block的数量,方便后面算法实现 /** 这里bno从当前blockIdx开始,每次循环加上grid的宽度,加入bno一开始是2,那么下一次就是2+32=34,如果batch_size也是32,那么循环结束 如果batch_size小于32,那么batch_size之后的block相当于是闲置的。 如果batch_size是64,gridDim等于32,那么就要所有线程分两次处理完一个batch。 也就是说,将batch_size按照一个网格中所有的块进行分组,依次处理batch中的数据 所以如果gridDim等于32,那么batch_size最好是32的整数倍数,这样就可以使得块中的线程因为无法整除而闲置 **/ int cnt=0; float tolerance=1e-4; // 初始化矩阵,对于一个n*p的矩阵,每个block的512个线程处理矩阵的一行,现将矩阵行优先地排开 // 对于一个线程,其处理的数据在所有数据的bno*n ~ bno*(n+1)之间的这些数据 // 将一行n个数据按照一个块中的线程分组,每个线程需要处理组数个数据(假设整除) // threadIdx.x表示线程在块内的序号,其应该处理的那些数据应该是bno*n+j,bno*n+j+blockDim.x,bno*n+j+blockDim.x*2... // 所以下面的一个for循环可以对任意形状的矩阵进行赋值 for (int j=threadIdx.x;j<n;j+=blockDim.x) matchl[bno*n+j]=-1; // matchl矩阵, (b,n), 用-1填充 for (int j=threadIdx.x;j<n;j+=blockDim.x) matchrbuf[j]=-1; // matchrbuf矩阵, (n=4096,1), 用-1填充 for (int j=threadIdx.x;j<n;j+=blockDim.x) Queue[j]=j; // Queue矩阵,(n=4096,1),使用0~4095填充 for (int j=threadIdx.x;j<n;j+=blockDim.x) pricer[j]=0; // pricer矩阵,(n=4096,1),使用0填充 const int Block=512; // 为什么要512个点一批进行处理,为什么不直接计算xyz1中所有点到xyz2所有点的距离呢? // 对于b*n*3的点云,第bno个block内的线程处理第bno个patch (如果batch_size大于block的数量,那么后面的patch无法被处理到?由k0控制) for (int k0=0;k0<n;k0+=Block){ // k0是当前要处理的512个点(或小于)的起始index,k1终止位置 int k1=min(n,k0+Block); // 如果n无法被Block整除,那么每一轮k1-k0都是512,否则最后一轮会是小于512的余数 for (int k=threadIdx.x;k<(k1-k0)*3;k+=blockDim.x) // threadIdx.x from 0 to 511, k from 0 to 512*3 buf[k]=xyz1[bno*n*3+k0*3+k]; // buf,(2048,1); xyz1,(b,n,3) // bno*n*3保证第bno个block内的线程只处理点云的第bno行,每块处理一个patch // k0每轮增加512,k0*3保证每一轮for循环(外层),可以处理512个点,k1=min(n,k0+Block)可以保证不会访问超多n个点 /** 所以这里对于buf赋值的整体思路是: 对于一个(b,n,3)的张量,我们每次用buf存储512个点的数据,实际上只需要512*3=1536个数字, 这里申请2048个数字可能是为了保证一定的余量,其实不用这么大。 首先将batch分组,由于buf每次保存512个点,所以需要将b*n*3个点按照每组512个点进行分组 所以,首先我们需要使用`for (int k0=0;k0<n;k0+=Block)`,Block=512保证每次都会处理512个点, 比如第三次循环,k0 = 0+512+512=1024,说明前1024个点已经被处理,也就是处理了k0*3个数字了。 当已经处理了k0*3个数字后,接下来要继续处理512个点,但是剩下的点的数量不一定有512个,所以使用k1=min(n,k0+Block) 保证我们不会访问越界。 对于512个点,需要继续分组给grid中的block的线程进行处理,外层循环k0保证遍历了一个点云的所有点, 但是我们并不是只有一个patch,所以这里是用每个block处理一个patch, 所以对于一个线程,其处于第bno%32个block的threadIdx.x位置上,应该处理的是第bno%32个patch的512*3个数字中的 第threadIdx.x、threadIdx.x+blockDim.x、threadIdx.x+blockDim.x+blockDim.x……个数字, 如果bno小于batch_size,那么后面的patch怎么被处理到呢?实际上在最开始的for循环已经将batch进行了分组, 所以如果batch_size大于block_num,那么每次只会处理不多于block_num个patch, **/ // 一个grid中的所有块是逐个执行的吗?如果不是的话,每个块处理一个patch,每个patch都会取512个点,那么buf中的数据会冲突啊, // 我们应该保证buf中的数据来自于同一个patch的512个点。 // 注意,buf向量是__shared__的,也就是说仅仅是块被共享的,并不是全局共享的 __syncthreads(); // 一个块中的线程在此刻同步,保证buf矩阵都被赋值完成了 /** 为什么套路是查看当前线程的idx,这个线程就处理第idx个数据,接下来处理第idx+blockDim.x个数据? 这样可以保证线程之间不会同时访问同一个元素,所有线程的序号从0开始,blockDim.x-1结束,它们都加上blockDim.x之后, 序号从blockDim.x开始,2*blockDim.x-1结束可以一块一块地处理数据。 **/ /** cost矩阵的形状是(b,n,n),表示b个推理得到的patch中的n个点到对应gt中的patch的的n个点的距离。 其中(i,j,k)位置的值便是pred得到的第i个patch的第j个点和gt中的第i个patch第k个点的欧式距离。 所以对于一个线程,其块号为bno,块内线程序号为j,那么我们在上一个循环已经将第bno个patch的512个点存储到了buf[] 我们用j遍历gt中第bno个patch的所有点,对于每个点我们计算buf中512个点到这个点的距离, 并存储到cost矩阵中,位置为(bno,k,j),对应的位置为bno*n*n+k*n+j。 可是这里为什么使用的是cost[blockIdx.x*n*n+k*n+j],blockIdx.x的大小范围是0~32,如果batch大于这个数了怎么办? 没关系,因为grid一次只能处理blockIdx.x对点云,总共有b组点云,分成很多组进行处理。 对于后面的组,前面的组计算的cost矩阵完全没用,直接覆盖就好了。 所以cost矩阵是过大了的,其实仅仅需要blockIdx.x*n*n就行了,但是cost设置为b*n*n非常保险, 如果blockIdx.x大于b,则后面的block是闲置的,仅前b个会忙碌,不会访问溢出。 **/ for (int j=threadIdx.x;j<n;j+=blockDim.x){ // j from 0 to 4096,处理一个patch中所有的点,一次循环里对坐标的三个数字操作 // bno*n*3表示依然是每个block处理一个patch,j*3表示第j个点的起始位置 float x2=xyz2[bno*n*3+j*3+0]; float y2=xyz2[bno*n*3+j*3+1]; float z2=xyz2[bno*n*3+j*3+2]; for (int k=k0;k<k1;k++){ // k0是当前要处理的512个点(或小于)在预测的patch的起始index,k1为终止位置 float x1=buf[(k-k0)*3+0]; float y1=buf[(k-k0)*3+1]; float z1=buf[(k-k0)*3+2]; float d=sqrtf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2)); cost[blockIdx.x*n*n+k*n+j]=d; } } __syncthreads(); // 512个点的距离到gt中所有点的距离全部计算好了 } // 这个for循环结束以后,cost矩阵就计算好了 //calculate the distacne if (threadIdx.x==0){ // 如果不是第一个线程,这两个变量岂不是无法初始化 // 并不是,这两个变量是__shared__的,会被块内所有线程共享,这里仅在第一个线程里初始化该变量,是防止重复初始化, // 后面再加一个同步指令就可以保证块中所有线程在同步时刻这两个变量都被初始化了,并且只被初始化了一次 // 观察到__shared__变量在赋值后通常都要同步一次,保证块内线程对该共享变量的操作都已经结束, // 否则一些先执行的线程继续走下面的逻辑,但是使用的共享变量一边被后面的访问,一遍被前面的修改,造成错误 qhead=0; qlen=n; } __syncthreads(); // 等待共享变量初始化完成 int loaded=0; // 寄存器变量,线程独享 float value9,value10,value11,value12,value13,value14,value15,value16; // 寄存器变量,线程独享 while (qlen){ // n个点执行n次while循环 int i=Queue[qhead]; // 当前循环要处理xyz1中的第i个点 // 寄存器变量,线程独享 int i2; // 寄存器变量,线程独享 if (qhead+1<n) // 如果i不是最后一个点的序号,则i2是i后面那个点的序号,如果i已经是最后一个点的序号了,i2则表示第一个点的序号 i2=Queue[qhead+1]; // 当前循环要处理xyz1中的第i2个点,处于i后面 else i2=Queue[0]; float best=1e38f,best2=1e38f; // 寄存器变量,线程独享 int bestj=0; // 寄存器变量,线程独享 if (n==blockDim.x*8){ // 如果n是块长度的8倍,4096/512=8,每个线程处理8个点,512个线程一次解决4096个点的运算 int j=threadIdx.x; float value1,value2,value3,value4,value5,value6,value7,value8; if (loaded){ // pricer,(4096,1),最开始使用0填充 value1=value9+pricer[j]; value2=value10+pricer[j+blockDim.x]; value3=value11+pricer[j+blockDim.x*2]; value4=value12+pricer[j+blockDim.x*3]; value5=value13+pricer[j+blockDim.x*4]; value6=value14+pricer[j+blockDim.x*5]; value7=value15+pricer[j+blockDim.x*6]; value8=value16+pricer[j+blockDim.x*7]; loaded=0; }else{ /** cost[blockIdx.x*n*n+i*n+j+blockDim.x * t]表示: xyz1中的第blockIdx.x个patch中的第i个点到xyz2中第blockIdx.x个patch中的第(j+blockDim.x * t)个点的距离 pricer(4096,1),初始化使用0填充, pricer[j+blockDim.x*t]表示产品的价格,也就是xyz1中的点云的权重 总共有32个块,每个块有512个线程,对于一个线程,它首先确定自己所在的块处理的是哪个点云(blockIdx.x*n*n), 然后确定自己要处理xyz1中的那个点(i*n), value1~8八个值分别表示这个点到达另外八个点的cost,这八个点的位置是这样计算的: 首先对于xyz1中的每个点,我们需要考虑它到xyz2中的对应patch的4096个点的cost, 我们将这4096个点分成8组,为什么是八组?因为一开始我们就判断了点数4096是blockDim.x的八倍, 这样每个线程需要计算八组中相同位置的8个值,记为value1~8 **/ value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2]; value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3]; value5=cost[blockIdx.x*n*n+i*n+j+blockDim.x*4]+pricer[j+blockDim.x*4]; value6=cost[blockIdx.x*n*n+i*n+j+blockDim.x*5]+pricer[j+blockDim.x*5]; value7=cost[blockIdx.x*n*n+i*n+j+blockDim.x*6]+pricer[j+blockDim.x*6]; value8=cost[blockIdx.x*n*n+i*n+j+blockDim.x*7]+pricer[j+blockDim.x*7]; /** value9~16是xyz1第blockIdx.x个patch中的第i2个点到xyz2中第blockIdx.x个patch中的的8个点的cost 相同块的另一个线程执行时,是第i2个点到另外八个点的cost, 一个块内的所有线程可以将xyz1中第i2个点到xyz2中第blockIdx.x个patch中的所有点的cost遍历完 value9~16在本轮迭代中并没有使用,而是在下一次迭代中被使用 **/ value9=cost[blockIdx.x*n*n+i2*n+j]; value10=cost[blockIdx.x*n*n+i2*n+j+blockDim.x]; value11=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*2]; value12=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*3]; value13=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*4]; value14=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*5]; value15=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*6]; value16=cost[blockIdx.x*n*n+i2*n+j+blockDim.x*7]; // 注意:同一个线程对应的value1~8对应的是xyz1中第i个点到xyz2中的八个点的cost, // value9~16对应的是xyz1中第i2个点到xyz2中的同样八个点的cost loaded=qlen>1; // qlen用于遍历xyz1中的n个点,每遍历一次会减少,所以一开始loaded会是True,到最后一个点会是False } int vj,vj2,vj3,vj4; if (value1<value2){ vj=j; // value1小,取其对应点的index }else{ vj=j+blockDim.x; // value2小,取其对应点的index float t=value1; // 并且交换value1和value2,保证value1更小 value1=value2; value2=t; } if (value3<value4){ vj2=j+blockDim.x*2; }else{ vj2=j+blockDim.x*3; float t=value3; value3=value4; value4=t; } if (value5<value6){ vj3=j+blockDim.x*4; }else{ vj3=j+blockDim.x*5; float t=value5; value5=value6; value6=t; } if (value7<value8){ vj4=j+blockDim.x*6; }else{ vj4=j+blockDim.x*7; float t=value7; value7=value8; value8=t; } // 到此为止,将value1~8中两两较小的值保存在奇数位置 if (value1<value3){ value2=fminf(value2,value3); // 1<2,3,4,3<4, 次小的在2和3之间,保存进value2 }else{ value2=fminf(value1,value4); // 3<1,2,4,1<2, 次小的在1和4之间,保存进value2 value1=value3; vj=vj2; } if (value5<value7){ value6=fminf(value6,value7); // 5<6,7,8,7<8, 次小的在6和7之间,保存进value6 }else{ value6=fminf(value5,value8); // 7<5,6,8,5<6, 次小的在5和8之间,保存进value6 value5=value7; vj3=vj4; } // 到此为止,将value1,3,5,7中两两较小的保存在value1和value5的位置,次小的保存在value2和value6中,下面比较这四个 if (value1<value5){ best=value1; bestj=vj; best2=fminf(value2,value5); // 1<2,5,6,5<6, 次小的在2和5之间,保存进best2 }else{ best2=fminf(value1,value6); // 5<1,2,6,1<2, 次小的在1和6之间,保存进best2 best=value5; bestj=vj3; } // 至此,八个数最小的保存在best中,次小的保存在best2中 }else if (n>=blockDim.x*4){ for (int j=threadIdx.x;j<n;j+=blockDim.x*4){ float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; float value3=cost[blockIdx.x*n*n+i*n+j+blockDim.x*2]+pricer[j+blockDim.x*2]; float value4=cost[blockIdx.x*n*n+i*n+j+blockDim.x*3]+pricer[j+blockDim.x*3]; int vj,vj2; if (value1<value2){ vj=j; }else{ vj=j+blockDim.x; float t=value1; value1=value2; value2=t; } if (value3<value4){ vj2=j+blockDim.x*2; }else{ vj2=j+blockDim.x*3; float t=value3; value3=value4; value4=t; } if (value1<value3){ value2=fminf(value2,value3); }else{ value2=fminf(value1,value4); value1=value3; vj=vj2; } if (best<value1){ best2=fminf(best2,value1); }else{ best2=fminf(best,value2); best=value1; bestj=vj; } } }else if (n>=blockDim.x*2){ for (int j=threadIdx.x;j<n;j+=blockDim.x*2){ float value1=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; float value2=cost[blockIdx.x*n*n+i*n+j+blockDim.x]+pricer[j+blockDim.x]; int vj; if (value1<value2){ vj=j; }else{ vj=j+blockDim.x; float t=value1; value1=value2; value2=t; } if (best<value1){ best2=fminf(best2,value1); }else{ best2=fminf(best,value2); best=value1; bestj=vj; } } }else{ // 对于xyz1中的第blockIdx.x个patch中的第i个点到xyz2中第blockIdx.x个patch中的所有点依次计算距离, // 使用best和best2记录最大和最小值。 // 下面这种for循环没有使用规约,一个块中的线程计算点云1中第i个点到点云2中所有点的最短距离, // 每个线程需要处理n/blockDim.x(+1)个点,每个线程就需要经过n/blockDim.x(+1)次循环, // 这种循环的缺点需要进一步分析 todo for (int j=threadIdx.x;j<n;j+=blockDim.x){ float value=cost[blockIdx.x*n*n+i*n+j]+pricer[j]; if (best<value){ best2=fminf(best2,value); }else{ best2=best; bestj=j; best=value; } } } // 这里不同线程负责计算不同部分的最小值,为什么不用同步? /** 由于上面在计算一个点到4096个点的距离的最小值时,分成了多个线程计算,32个线程都有着自己负责的数据的最小值 为了得到一个总的最小值,需要将这些最小值放在一起继续对比, 但是这些最小值都存放在各自线程的寄存器中,并不能直接共享, 所以这里要用线程束洗牌指令将不同的线程中的数据合并起来 **/ for (int i=16;i>0;i>>=1){ // i = 16,8,4,2,1 // 得到当前线程在warp中的编号(0~32)减去i的线程的 best、best2、bestj 变量的值,分别存在 b1、b2、bj 中 // 根据之后填充bests的逻辑来看,这个循环可以将一个warp中的所有线程中的最小值进行比较,并最终存放在0,32,64,……,480这些线程上 // 也就是每个warp的最后一个线程会和warp内的所有线程进行比较,得到这32个线程中的最小值 float b1=__shfl_down_sync(0xFFFFFFFF,best,i,32); float b2=__shfl_down_sync(0xFFFFFFFF,best2,i,32); int bj=__shfl_down_sync(0xFFFFFFFF,bestj,i,32); // 比较另一个线程中的最小值、次小值和当前线程中的最小值、次小值, if (best<b1){ // best<b1<b2, best<best2, 次小值在b1和best2中 best2=fminf(b1,best2); }else{ // b1<best<best2, b1<b2, 次小值在best和b2中 best=b1; best2=fminf(best,b2); bestj=bj; } } if ((threadIdx.x&31)==0){ // 符合条件的是0,32,64,……,480,可以直接用%的 /** bests的形状是(32, 3),我们有512个线程,将512个线程的结果分32组存放在bests中 线程序号为threadIdx.x的线程结果存放在bests[threadIdx.x % 32][:]的三个位置上 注意,并不会冲突,因为符合条件的线程id为0,32,64,……,480,它们分别存储在bests的0,1,2,……,15位置上 **/ bests[threadIdx.x>>5][0]=best; bests[threadIdx.x>>5][1]=best2; *(int*)&bests[threadIdx.x>>5][2]=bestj; // 这是什么语法 } __syncthreads(); // 保证bests矩阵赋值完成 int nn=blockDim.x>>5; // 512>>5=16 /** 至此,对于xyz中的任意一个点,我们得到了它到xyz中对应patch的4096个点的距离中最小的16个,分别存在bests的前十六行。 接下来,很自然,我们要根据这十六个距离得到这个点到4096个点的最近距离。 根据后面的逻辑,这里会将最优结果保存在第0个线程中 **/ if (threadIdx.x<nn){ best=bests[threadIdx.x][0]; best2=bests[threadIdx.x][1]; bestj=*(int*)&bests[threadIdx.x][2]; for (int i=nn>>1;i>0;i>>=1){ // i = 8,4,2,1 float b1=__shfl_down_sync(0xFFFFFFFF,best,i,32); float b2=__shfl_down_sync(0xFFFFFFFF,best2,i,32); int bj=__shfl_down_sync(0xFFFFFFFF,bestj,i,32); if (best<b1){ best2=fminf(b1,best2); }else{ best=b1; best2=fminf(best,b2); bestj=bj; } } } if (threadIdx.x==0){ float delta=best2-best+tolerance; // 这个block负责的xyz1中的这个点到xyz2中的4096个点的最近距离、次近距离 qhead++; qlen--; // qlen = 4096,4095,……,1 if (qhead>=n) // 如果qhead遍历到xyz1对应patch的最后一点,再从第一个点重新遍历 qhead-=n; int old=matchrbuf[bestj]; // (4096,1),目前谁到第二个点云中第bestj个点最近 pricer[bestj]+=delta; // 一个点的价格等于这个点到达最近点和次近点的距离差 /** 为什么要这么定义price? 我的理解是,一个点的价格为最近点和次近点的距离差, 这个距离越大,说明如果把这个点指派到非最近点会造成的结果更差。 为了整体均衡,我们需要指派一些点不和它们的最近点匹配, 但是如果能够保证这些点是距离次近点没那么远的点, 而那些距离次近点远很多的点,最好不要动它们 **/ cnt++; // while循环的次数 if (old!=-1){ // matchrbuf是用-1填充的,所以如果之前没有那个点到这个点最近,那么old==1 int ql=qlen; int tail=qhead+ql; // tail = qlen=ql+1; if (tail>=n) tail-=n; Queue[tail]=old; } if (cnt==(40*n)){ if (tolerance==1.0) qlen=0; tolerance=fminf(1.0,tolerance*100); cnt=0; } } __syncthreads(); if (threadIdx.x==0){ // 赋值操作仅需要一个线程来完成就行,避免多个线程同时写一块内存 matchrbuf[bestj]=i; // 第二个点云中bestj个点与第一个点云中第i个点配对 } } __syncthreads(); // matchrbuf是__shared__,需要在这里同步块内线程 for (int j=threadIdx.x;j<n;j+=blockDim.x) // 使用matchrbuf中每个点对应的最近点的序号更新matchr // matchr表示第二个点云中的每个点和第一个点云中每个点的配对信息 matchr[bno*n+j]=matchrbuf[j]; for (int j=threadIdx.x;j<n;j+=blockDim.x) // 更新matchl,保证和matchr中的配对情况一致 matchl[bno*n+matchrbuf[j]]=j; __syncthreads(); // 到此为止,点的配对情况计算完成,分别保存在matchr和matchl中,我们只需要matchl就行 } } void AuctionMatchLauncher(int b,int n,const float * xyz1,const float * xyz2,int * matchl,int * matchr,float * cost){ AuctionMatchKernel<<<32,512>>>(b,n,xyz1,xyz2,matchl,matchr,cost); // 一个grid包含32个block,每个block包含512个线程,均为线性排布 }
ea0d7c7cdb292404052e990a094a0285f4a5b70a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * * * * This sample illustrates the usage of CUDA events for both GPU timing and * overlapping CPU and GPU execution. Events are insterted into a stream * of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can * perform computations while GPU is executing (including DMA memcopies * between the host and device). CPU can query CUDA events to determine * whether GPU has completed tasks. * */ #include <stdio.h> #include <cutil_inline.h> #include <shrQATest.h> __global__ void increment_kernel(int *g_data, int inc_value) { int idx = blockIdx.x * blockDim.x + threadIdx.x; g_data[idx] = g_data[idx] + inc_value; } int correct_output(int *data, const int n, const int x) { for(int i = 0; i < n; i++) if(data[i] != x) return 0; return 1; } int main(int argc, char *argv[]) { int devID; hipDeviceProp_t deviceProps; shrQAStart(argc, argv); if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) { devID = cutilDeviceInit(argc, argv); if (devID < 0) { printf("No CUDA Capable devices found, exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_WAIVED); } } else { devID = cutGetMaxGflopsDeviceId(); hipSetDevice( devID ); } // get device name cutilSafeCall(hipGetDeviceProperties(&deviceProps, devID)); printf("CUDA device [%s]\n", deviceProps.name); int n = 16 * 1024 * 1024; int nbytes = n * sizeof(int); int value = 26; // allocate host memory int *a = 0; cutilSafeCall( hipHostMalloc((void**)&a, nbytes) ); memset(a, 0, nbytes); // allocate device memory int *d_a=0; cutilSafeCall( hipMalloc((void**)&d_a, nbytes) ); cutilSafeCall( hipMemset(d_a, 255, nbytes) ); // set kernel launch configuration dim3 threads = dim3(512, 1); dim3 blocks = dim3(n / threads.x, 1); // create cuda event handles hipEvent_t start, stop; cutilSafeCall( hipEventCreate(&start) ); cutilSafeCall( hipEventCreate(&stop) ); unsigned int timer; cutilCheckError( cutCreateTimer(&timer) ); cutilCheckError( cutResetTimer(timer) ); cutilSafeCall( cutilDeviceSynchronize() ); float gpu_time = 0.0f; // asynchronously issue work to the GPU (all to stream 0) cutilCheckError( cutStartTimer(timer) ); hipEventRecord(start, 0); hipMemcpyAsync(d_a, a, nbytes, hipMemcpyHostToDevice, 0); hipLaunchKernelGGL(( increment_kernel), dim3(blocks), dim3(threads), 0, 0, d_a, value); hipMemcpyAsync(a, d_a, nbytes, hipMemcpyDeviceToHost, 0); hipEventRecord(stop, 0); cutilCheckError( cutStopTimer(timer) ); // have CPU do some work while waiting for stage 1 to finish unsigned long int counter=0; while( hipEventQuery(stop) == hipErrorNotReady ) { counter++; } cutilSafeCall( hipEventElapsedTime(&gpu_time, start, stop) ); // print the cpu and gpu times printf("time spent executing by the GPU: %.2f\n", gpu_time); printf("time spent by CPU in CUDA calls: %.2f\n", cutGetTimerValue(timer) ); printf("CPU executed %d iterations while waiting for GPU to finish\n", counter); // check the output for correctness bool bFinalResults = (bool)correct_output(a, n, value); // release resources cutilSafeCall( hipEventDestroy(start) ); cutilSafeCall( hipEventDestroy(stop) ); cutilSafeCall( hipHostFree(a) ); cutilSafeCall( hipFree(d_a) ); cutilDeviceReset(); shrQAFinishExit(argc, (const char **)argv, (bFinalResults ? QA_PASSED : QA_FAILED)); }
ea0d7c7cdb292404052e990a094a0285f4a5b70a.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * * * * This sample illustrates the usage of CUDA events for both GPU timing and * overlapping CPU and GPU execution. Events are insterted into a stream * of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can * perform computations while GPU is executing (including DMA memcopies * between the host and device). CPU can query CUDA events to determine * whether GPU has completed tasks. * */ #include <stdio.h> #include <cutil_inline.h> #include <shrQATest.h> __global__ void increment_kernel(int *g_data, int inc_value) { int idx = blockIdx.x * blockDim.x + threadIdx.x; g_data[idx] = g_data[idx] + inc_value; } int correct_output(int *data, const int n, const int x) { for(int i = 0; i < n; i++) if(data[i] != x) return 0; return 1; } int main(int argc, char *argv[]) { int devID; cudaDeviceProp deviceProps; shrQAStart(argc, argv); if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) { devID = cutilDeviceInit(argc, argv); if (devID < 0) { printf("No CUDA Capable devices found, exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_WAIVED); } } else { devID = cutGetMaxGflopsDeviceId(); cudaSetDevice( devID ); } // get device name cutilSafeCall(cudaGetDeviceProperties(&deviceProps, devID)); printf("CUDA device [%s]\n", deviceProps.name); int n = 16 * 1024 * 1024; int nbytes = n * sizeof(int); int value = 26; // allocate host memory int *a = 0; cutilSafeCall( cudaMallocHost((void**)&a, nbytes) ); memset(a, 0, nbytes); // allocate device memory int *d_a=0; cutilSafeCall( cudaMalloc((void**)&d_a, nbytes) ); cutilSafeCall( cudaMemset(d_a, 255, nbytes) ); // set kernel launch configuration dim3 threads = dim3(512, 1); dim3 blocks = dim3(n / threads.x, 1); // create cuda event handles cudaEvent_t start, stop; cutilSafeCall( cudaEventCreate(&start) ); cutilSafeCall( cudaEventCreate(&stop) ); unsigned int timer; cutilCheckError( cutCreateTimer(&timer) ); cutilCheckError( cutResetTimer(timer) ); cutilSafeCall( cutilDeviceSynchronize() ); float gpu_time = 0.0f; // asynchronously issue work to the GPU (all to stream 0) cutilCheckError( cutStartTimer(timer) ); cudaEventRecord(start, 0); cudaMemcpyAsync(d_a, a, nbytes, cudaMemcpyHostToDevice, 0); increment_kernel<<<blocks, threads, 0, 0>>>(d_a, value); cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, 0); cudaEventRecord(stop, 0); cutilCheckError( cutStopTimer(timer) ); // have CPU do some work while waiting for stage 1 to finish unsigned long int counter=0; while( cudaEventQuery(stop) == cudaErrorNotReady ) { counter++; } cutilSafeCall( cudaEventElapsedTime(&gpu_time, start, stop) ); // print the cpu and gpu times printf("time spent executing by the GPU: %.2f\n", gpu_time); printf("time spent by CPU in CUDA calls: %.2f\n", cutGetTimerValue(timer) ); printf("CPU executed %d iterations while waiting for GPU to finish\n", counter); // check the output for correctness bool bFinalResults = (bool)correct_output(a, n, value); // release resources cutilSafeCall( cudaEventDestroy(start) ); cutilSafeCall( cudaEventDestroy(stop) ); cutilSafeCall( cudaFreeHost(a) ); cutilSafeCall( cudaFree(d_a) ); cutilDeviceReset(); shrQAFinishExit(argc, (const char **)argv, (bFinalResults ? QA_PASSED : QA_FAILED)); }
4091f029d3bc5952f301552c03e9463fc7930e90.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <sys/types.h> #include <unistd.h> #include <stdio.h> #include "nccl.h" #include "mpi.h" #include "test_utilities.h" #define SIZE 128 #define NITERS 1 int main(int argc, char *argv[]) { ncclUniqueId commId; int size, rank; ncclResult_t ret; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (argc < size) { if (rank == 0) printf("Usage : %s <GPU list per rank>\n", argv[0]); exit(1); } int gpu = atoi(argv[rank+1]); // We have to set our device before NCCL init CUDACHECK(hipSetDevice(gpu)); MPI_Barrier(MPI_COMM_WORLD); // NCCL Communicator creation ncclComm_t comm; NCCLCHECK(ncclGetUniqueId(&commId)); MPI_Bcast(&commId, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD); ret = ncclCommInitRank(&comm, size, commId, rank); if (ret != ncclSuccess) { printf("NCCL Init failed (%d) '%s'\n", ret, ncclGetErrorString(ret)); exit(1); } // CUDA stream creation hipStream_t stream; CUDACHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); // Initialize input values int *dptr; CUDACHECK(hipMalloc(&dptr, SIZE*2*sizeof(int))); int *val = (int*) malloc(SIZE*sizeof(int)); for (int v=0; v<SIZE; v++) { val[v] = rank + 1; } CUDACHECK(hipMemcpy(dptr, val, SIZE*sizeof(int), hipMemcpyHostToDevice)); // Compute final value int ref = size*(size+1)/2; // Run allreduce int errors = 0; for (int i=0; i<NITERS; i++) { NCCLCHECK(ncclAllReduce((const void*)dptr, (void*)(dptr+SIZE), SIZE, ncclInt, ncclSum, comm, stream)); } // Check results CUDACHECK(hipStreamSynchronize(stream)); CUDACHECK(hipMemcpy(val, (dptr+SIZE), SIZE*sizeof(int), hipMemcpyDeviceToHost)); for (int v=0; v<SIZE; v++) { if (val[v] != ref) { errors++; printf("[%d] Error at %d : got %d instead of %d\n", rank, v, val[v], ref); } } CUDACHECK(hipFree(dptr)); MPI_Allreduce(MPI_IN_PLACE, &errors, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (errors) printf("%d errors. Test FAILED.\n", errors); else printf("Test PASSED.\n"); } MPI_Finalize(); ncclCommDestroy(comm); return errors ? 1 : 0; }
4091f029d3bc5952f301552c03e9463fc7930e90.cu
/************************************************************************* * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ************************************************************************/ #include <sys/types.h> #include <unistd.h> #include <stdio.h> #include "nccl.h" #include "mpi.h" #include "test_utilities.h" #define SIZE 128 #define NITERS 1 int main(int argc, char *argv[]) { ncclUniqueId commId; int size, rank; ncclResult_t ret; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); if (argc < size) { if (rank == 0) printf("Usage : %s <GPU list per rank>\n", argv[0]); exit(1); } int gpu = atoi(argv[rank+1]); // We have to set our device before NCCL init CUDACHECK(cudaSetDevice(gpu)); MPI_Barrier(MPI_COMM_WORLD); // NCCL Communicator creation ncclComm_t comm; NCCLCHECK(ncclGetUniqueId(&commId)); MPI_Bcast(&commId, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD); ret = ncclCommInitRank(&comm, size, commId, rank); if (ret != ncclSuccess) { printf("NCCL Init failed (%d) '%s'\n", ret, ncclGetErrorString(ret)); exit(1); } // CUDA stream creation cudaStream_t stream; CUDACHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); // Initialize input values int *dptr; CUDACHECK(cudaMalloc(&dptr, SIZE*2*sizeof(int))); int *val = (int*) malloc(SIZE*sizeof(int)); for (int v=0; v<SIZE; v++) { val[v] = rank + 1; } CUDACHECK(cudaMemcpy(dptr, val, SIZE*sizeof(int), cudaMemcpyHostToDevice)); // Compute final value int ref = size*(size+1)/2; // Run allreduce int errors = 0; for (int i=0; i<NITERS; i++) { NCCLCHECK(ncclAllReduce((const void*)dptr, (void*)(dptr+SIZE), SIZE, ncclInt, ncclSum, comm, stream)); } // Check results CUDACHECK(cudaStreamSynchronize(stream)); CUDACHECK(cudaMemcpy(val, (dptr+SIZE), SIZE*sizeof(int), cudaMemcpyDeviceToHost)); for (int v=0; v<SIZE; v++) { if (val[v] != ref) { errors++; printf("[%d] Error at %d : got %d instead of %d\n", rank, v, val[v], ref); } } CUDACHECK(cudaFree(dptr)); MPI_Allreduce(MPI_IN_PLACE, &errors, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD); if (rank == 0) { if (errors) printf("%d errors. Test FAILED.\n", errors); else printf("Test PASSED.\n"); } MPI_Finalize(); ncclCommDestroy(comm); return errors ? 1 : 0; }
0ccfdd1a34c309a016529119dc4460b31ab82ee9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions mixed zc -> ds */ #include "magma_internal.h" #define NB 64 /******************************************************************************/ // adds x += r (including conversion to double) --and-- // copies w = b // each thread does one index, x[i] and w[i] __global__ void zcaxpycp_kernel( int m, magmaFloatComplex *r, magmaDoubleComplex *x, const magmaDoubleComplex *b, magmaDoubleComplex *w ) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_Z_ADD( x[i], MAGMA_Z_MAKE( MAGMA_Z_REAL( r[i] ), MAGMA_Z_IMAG( r[i] ) ) ); w[i] = b[i]; } } /***************************************************************************//** adds x += r (including conversion to double) --and-- copies w = b *******************************************************************************/ extern "C" void magmablas_zcaxpycp( magma_int_t m, magmaFloatComplex_ptr r, magmaDoubleComplex_ptr x, magmaDoubleComplex_const_ptr b, magmaDoubleComplex_ptr w, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); hipLaunchKernelGGL(( zcaxpycp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, r, x, b, w ); }
0ccfdd1a34c309a016529119dc4460b31ab82ee9.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions mixed zc -> ds */ #include "magma_internal.h" #define NB 64 /******************************************************************************/ // adds x += r (including conversion to double) --and-- // copies w = b // each thread does one index, x[i] and w[i] __global__ void zcaxpycp_kernel( int m, magmaFloatComplex *r, magmaDoubleComplex *x, const magmaDoubleComplex *b, magmaDoubleComplex *w ) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_Z_ADD( x[i], MAGMA_Z_MAKE( MAGMA_Z_REAL( r[i] ), MAGMA_Z_IMAG( r[i] ) ) ); w[i] = b[i]; } } /***************************************************************************//** adds x += r (including conversion to double) --and-- copies w = b *******************************************************************************/ extern "C" void magmablas_zcaxpycp( magma_int_t m, magmaFloatComplex_ptr r, magmaDoubleComplex_ptr x, magmaDoubleComplex_const_ptr b, magmaDoubleComplex_ptr w, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); zcaxpycp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, r, x, b, w ); }
ef691f65142406b072e1a9b13f56df6008e2c507.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! @file * \brief Descriptions and declarations for structures used in GPU * * <pre> * -- Distributed SuperLU routine (version 7.2) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley, * Georgia Institute of Technology, Oak Ridge National Laboratory * * Last update: November 14, 2021 remove dependence on CUB/scan * </pre> */ //#define GPU_DEBUG #include "superlu_defs.h" #undef Reduce //#include <thrust/system/gpu/detail/hipcub/hipcub.hpp> #include "slustruct_gpu.h" #ifdef HAVE_CUDA #include "superlu_gpu_utils.hip" #elif defined(HAVE_HIP) #include "superlu_gpu_utils.hip.cpp" #endif //extern "C" { // void cblas_daxpy(const int N, const double alpha, const double *X, // const int incX, double *Y, const int incY); //} // gpublasStatus_t checkGPUblas(gpublasStatus_t result) // { // #if defined(DEBUG) || defined(_DEBUG) // if (result != GPUBLAS_STATUS_SUCCESS) // { // fprintf(stderr, "CUDA Blas Runtime Error: %s\n", gpublasGetErrorString(result)); // assert(result == GPUBLAS_STATUS_SUCCESS); // } // #endif // return result; // } // #define UNIT_STRIDE #if 0 ////////// this routine is not used anymore __device__ inline void device_scatter_l (int_t thread_id, int_t nsupc, int_t temp_nbrow, int_t *usub, int_t iukp, int_t klst, float *nzval, int_t ldv, float *tempv, int_t nbrow, // int_t *indirect2_thread int *indirect2_thread ) { int_t segsize, jj; for (jj = 0; jj < nsupc; ++jj) { segsize = klst - usub[iukp + jj]; if (segsize) { if (thread_id < temp_nbrow) { #ifndef UNIT_STRIDE nzval[indirect2_thread[thread_id]] -= tempv[thread_id]; #else nzval[thread_id] -= tempv[thread_id]; /*making access unit strided*/ #endif } tempv += nbrow; } nzval += ldv; } } #endif ///////////// not used //#define THREAD_BLOCK_SIZE 256 /* Sherry: was 192. should be <= MAX_SUPER_SIZE */ __device__ inline void sdevice_scatter_l_2D (int thread_id, int nsupc, int temp_nbrow, int_t *usub, int iukp, int_t klst, float *nzval, int ldv, const float *tempv, int nbrow, int *indirect2_thread, int nnz_cols, int ColPerBlock, int *IndirectJ3 ) { int i; if ( thread_id < temp_nbrow * ColPerBlock ) { int thread_id_x = thread_id % temp_nbrow; int thread_id_y = thread_id / temp_nbrow; #define UNROLL_ITER 8 #pragma unroll 4 for (int col = thread_id_y; col < nnz_cols ; col += ColPerBlock) { i = ldv * IndirectJ3[col] + indirect2_thread[thread_id_x]; nzval[i] -= tempv[nbrow * col + thread_id_x]; } } } /* Sherry: this routine is not used */ #if 0 ////////////////////////////////////////////// __global__ void cub_scan_test(void) { int thread_id = threadIdx.x; typedef hipcub::BlockScan<int, MAX_SUPER_SIZE > BlockScan; /*1D int data type*/ __shared__ typename BlockScan::TempStorage temp_storage; /*storage temp*/ __shared__ int IndirectJ1[MAX_SUPER_SIZE]; __shared__ int IndirectJ2[MAX_SUPER_SIZE]; if (thread_id < MAX_SUPER_SIZE) { IndirectJ1[thread_id] = (thread_id + 1) % 2; } __syncthreads(); if (thread_id < MAX_SUPER_SIZE) BlockScan(temp_storage).InclusiveSum (IndirectJ1[thread_id], IndirectJ2[thread_id]); if (thread_id < MAX_SUPER_SIZE) printf("%d %d\n", thread_id, IndirectJ2[thread_id]); } #endif /////////////////////////////////// not used __device__ inline void device_scatter_u_2D (int thread_id, int temp_nbrow, int nsupc, float * ucol, int_t * usub, int iukp, int_t ilst, int_t klst, int_t * index, int iuip_lib, float * tempv, int nbrow, int *indirect, int nnz_cols, int ColPerBlock, int *IndirectJ1, int *IndirectJ3 ) { int i; if ( thread_id < temp_nbrow * ColPerBlock ) { /* 1D threads are logically arranged in 2D shape. */ int thread_id_x = thread_id % temp_nbrow; int thread_id_y = thread_id / temp_nbrow; #pragma unroll 4 for (int col = thread_id_y; col < nnz_cols ; col += ColPerBlock) { i = IndirectJ1[IndirectJ3[col]]-ilst + indirect[thread_id_x]; ucol[i] -= tempv[nbrow * col + thread_id_x]; } } } __global__ void Scatter_GPU_kernel( int_t streamId, int_t ii_st, int_t ii_end, int_t jj_st, int_t jj_end, /* defines rectangular Schur block to be scatter */ int_t klst, int_t jj0, /* 0 on entry */ int_t nrows, int_t ldt, int_t npcol, int_t nprow, sLUstruct_gpu_t * A_gpu) { /* initializing pointers */ int_t *xsup = A_gpu->xsup; int_t *UrowindPtr = A_gpu->UrowindPtr; int_t *UrowindVec = A_gpu->UrowindVec; int_t *UnzvalPtr = A_gpu->UnzvalPtr; float *UnzvalVec = A_gpu->UnzvalVec; int_t *LrowindPtr = A_gpu->LrowindPtr; int_t *LrowindVec = A_gpu->LrowindVec; int_t *LnzvalPtr = A_gpu->LnzvalPtr; float *LnzvalVec = A_gpu->LnzvalVec; float *bigV = A_gpu->scubufs[streamId].bigV; local_l_blk_info_t *local_l_blk_infoVec = A_gpu->local_l_blk_infoVec; local_u_blk_info_t *local_u_blk_infoVec = A_gpu->local_u_blk_infoVec; int_t *local_l_blk_infoPtr = A_gpu->local_l_blk_infoPtr; int_t *local_u_blk_infoPtr = A_gpu->local_u_blk_infoPtr; Remain_info_t *Remain_info = A_gpu->scubufs[streamId].Remain_info; Ublock_info_t *Ublock_info = A_gpu->scubufs[streamId].Ublock_info; int_t *lsub = A_gpu->scubufs[streamId].lsub; int_t *usub = A_gpu->scubufs[streamId].usub; /* thread block assignment: this thread block is assigned to block (lb, j) in 2D grid */ int lb = blockIdx.x + ii_st; int j = blockIdx.y + jj_st; extern __shared__ int s[]; int* indirect_lptr = s; /* row-wise */ int* indirect2_thread= (int*) &indirect_lptr[ldt]; /* row-wise */ int* IndirectJ1= (int*) &indirect2_thread[ldt]; /* column-wise */ int* IndirectJ3= (int*) &IndirectJ1[ldt]; /* column-wise */ //int THREAD_BLOCK_SIZE =ldt; int* pfxStorage = (int*) &IndirectJ3[ldt]; int thread_id = threadIdx.x; int iukp = Ublock_info[j].iukp; int jb = Ublock_info[j].jb; int nsupc = SuperSize (jb); int ljb = jb / npcol; float *tempv1; if (jj_st == jj0) { tempv1 = (j == jj_st) ? bigV : bigV + Ublock_info[j - 1].full_u_cols * nrows; } else { tempv1 = (j == jj_st) ? bigV : bigV + (Ublock_info[j - 1].full_u_cols - Ublock_info[jj_st - 1].full_u_cols) * nrows; } /* # of nonzero columns in block j */ int nnz_cols = (j == 0) ? Ublock_info[j].full_u_cols : (Ublock_info[j].full_u_cols - Ublock_info[j - 1].full_u_cols); int cum_ncol = (j == 0) ? 0 : Ublock_info[j - 1].full_u_cols; int lptr = Remain_info[lb].lptr; int ib = Remain_info[lb].ib; int temp_nbrow = lsub[lptr + 1]; /* number of rows in the current L block */ lptr += LB_DESCRIPTOR; int_t cum_nrow; if (ii_st == 0) { cum_nrow = (lb == 0 ? 0 : Remain_info[lb - 1].FullRow); } else { cum_nrow = (lb == 0 ? 0 : Remain_info[lb - 1].FullRow - Remain_info[ii_st - 1].FullRow); } tempv1 += cum_nrow; if (ib < jb) /*scatter U code */ { int ilst = FstBlockC (ib + 1); int lib = ib / nprow; /* local index of row block ib */ int_t *index = &UrowindVec[UrowindPtr[lib]]; int num_u_blocks = index[0]; int ljb = (jb) / npcol; /* local index of column block jb */ /* Each thread is responsible for one block column */ __shared__ int ljb_ind; /*do a search ljb_ind at local row lib*/ int blks_per_threads = CEILING(num_u_blocks, blockDim.x); // printf("blockDim.x =%d \n", blockDim.x); for (int i = 0; i < blks_per_threads; ++i) /* each thread is assigned a chunk of consecutive U blocks to search */ { /* only one thread finds the block index matching ljb */ if (thread_id * blks_per_threads + i < num_u_blocks && local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + thread_id * blks_per_threads + i ].ljb == ljb) { ljb_ind = thread_id * blks_per_threads + i; } } __syncthreads(); int iuip_lib = local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + ljb_ind].iuip; int ruip_lib = local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + ljb_ind].ruip; iuip_lib += UB_DESCRIPTOR; float *Unzval_lib = &UnzvalVec[UnzvalPtr[lib]]; float *ucol = &Unzval_lib[ruip_lib]; if (thread_id < temp_nbrow) /* row-wise */ { /* cyclically map each thread to a row */ indirect_lptr[thread_id] = (int) lsub[lptr + thread_id]; } /* column-wise: each thread is assigned one column */ if (thread_id < nnz_cols) IndirectJ3[thread_id] = A_gpu->scubufs[streamId].usub_IndirectJ3[cum_ncol + thread_id]; /* indirectJ3[j] == kk means the j-th nonzero segment points to column kk in this supernode */ __syncthreads(); /* threads are divided into multiple columns */ int ColPerBlock = blockDim.x / temp_nbrow; // if (thread_id < blockDim.x) // IndirectJ1[thread_id] = 0; if (thread_id < ldt) IndirectJ1[thread_id] = 0; if (thread_id < blockDim.x) { if (thread_id < nsupc) { /* fstnz subscript of each column in the block */ IndirectJ1[thread_id] = -index[iuip_lib + thread_id] + ilst; } } /* perform an inclusive block-wide prefix sum among all threads */ __syncthreads(); incScan(IndirectJ1, pfxStorage, nsupc); __syncthreads(); device_scatter_u_2D ( thread_id, temp_nbrow, nsupc, ucol, usub, iukp, ilst, klst, index, iuip_lib, tempv1, nrows, indirect_lptr, nnz_cols, ColPerBlock, IndirectJ1, IndirectJ3 ); } else /* ib >= jb, scatter L code */ { int rel; float *nzval; int_t *index = &LrowindVec[LrowindPtr[ljb]]; int num_l_blocks = index[0]; int ldv = index[1]; int fnz = FstBlockC (ib); int lib = ib / nprow; __shared__ int lib_ind; /*do a search lib_ind for lib*/ int blks_per_threads = CEILING(num_l_blocks, blockDim.x); for (int i = 0; i < blks_per_threads; ++i) { if (thread_id * blks_per_threads + i < num_l_blocks && local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + thread_id * blks_per_threads + i ].lib == lib) { lib_ind = thread_id * blks_per_threads + i; } } __syncthreads(); int lptrj = local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + lib_ind].lptrj; int luptrj = local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + lib_ind].luptrj; lptrj += LB_DESCRIPTOR; int dest_nbrow = index[lptrj - 1]; if (thread_id < dest_nbrow) { rel = index[lptrj + thread_id] - fnz; indirect_lptr[rel] = thread_id; } __syncthreads(); /* can be precalculated */ if (thread_id < temp_nbrow) { rel = lsub[lptr + thread_id] - fnz; indirect2_thread[thread_id] = indirect_lptr[rel]; } if (thread_id < nnz_cols) IndirectJ3[thread_id] = (int) A_gpu->scubufs[streamId].usub_IndirectJ3[cum_ncol + thread_id]; __syncthreads(); int ColPerBlock = blockDim.x / temp_nbrow; nzval = &LnzvalVec[LnzvalPtr[ljb]] + luptrj; sdevice_scatter_l_2D( thread_id, nsupc, temp_nbrow, usub, iukp, klst, nzval, ldv, tempv1, nrows, indirect2_thread, nnz_cols, ColPerBlock, IndirectJ3); } /* end else ib >= jb */ } /* end Scatter_GPU_kernel */ #define GPU_2D_SCHUDT /* Not used */ int sSchurCompUpdate_GPU( int_t streamId, int_t jj_cpu, /* 0 on entry, pointing to the start of Phi part */ int_t nub, /* jj_cpu on entry, pointing to the end of the Phi part */ int_t klst, int_t knsupc, int_t Rnbrow, int_t RemainBlk, int_t Remain_lbuf_send_size, int_t bigu_send_size, int_t ldu, int_t mcb, /* num_u_blks_hi */ int_t buffer_size, int_t lsub_len, int_t usub_len, int_t ldt, int_t k0, ssluGPU_t *sluGPU, gridinfo_t *grid ) { int SCATTER_THREAD_BLOCK_SIZE=512; sLUstruct_gpu_t * A_gpu = sluGPU->A_gpu; sLUstruct_gpu_t * dA_gpu = sluGPU->dA_gpu; int_t nprow = grid->nprow; int_t npcol = grid->npcol; gpuStream_t FunCallStream = sluGPU->funCallStreams[streamId]; gpublasHandle_t gpublas_handle0 = sluGPU->gpublasHandles[streamId]; int_t * lsub = A_gpu->scubufs[streamId].lsub_buf; int_t * usub = A_gpu->scubufs[streamId].usub_buf; Remain_info_t *Remain_info = A_gpu->scubufs[streamId].Remain_info_host; float * Remain_L_buff = A_gpu->scubufs[streamId].Remain_L_buff_host; Ublock_info_t *Ublock_info = A_gpu->scubufs[streamId].Ublock_info_host; float * bigU = A_gpu->scubufs[streamId].bigU_host; A_gpu->isOffloaded[k0] = 1; /* start by sending data to */ int_t *xsup = A_gpu->xsup_host; int_t col_back = (jj_cpu == 0) ? 0 : Ublock_info[jj_cpu - 1].full_u_cols; // if(nub<1) return; int_t ncols = Ublock_info[nub - 1].full_u_cols - col_back; /* Sherry: can get max_super_size from sp_ienv(3) */ int_t indirectJ1[MAX_SUPER_SIZE]; // 0 indicates an empry segment int_t indirectJ2[MAX_SUPER_SIZE]; // # of nonzero segments so far int_t indirectJ3[MAX_SUPER_SIZE]; /* indirectJ3[j] == k means the j-th nonzero segment points to column k in this supernode */ /* calculate usub_indirect */ for (int jj = jj_cpu; jj < nub; ++jj) { int_t iukp = Ublock_info[jj].iukp; int_t jb = Ublock_info[jj].jb; int_t nsupc = SuperSize (jb); int_t addr = (jj == 0) ? 0 : Ublock_info[jj - 1].full_u_cols - col_back; for (int_t kk = 0; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { indirectJ1[kk] = 0; } for (int_t kk = 0; kk < nsupc; ++kk) { indirectJ1[kk] = ((klst - usub[iukp + kk]) == 0) ? 0 : 1; } /*prefix sum - indicates # of nonzero segments up to column kk */ indirectJ2[0] = indirectJ1[0]; for (int_t kk = 1; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { indirectJ2[kk] = indirectJ2[kk - 1] + indirectJ1[kk]; } /* total number of nonzero segments in this supernode */ int nnz_col = indirectJ2[nsupc - 1]; // old: MAX_SUPER_SIZE /* compactation */ for (int_t kk = 0; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { if (indirectJ1[kk]) /* kk is a nonzero segment */ { /* indirectJ3[j] == kk means the j-th nonzero segment points to column kk in this supernode */ indirectJ3[indirectJ2[kk] - 1] = kk; } } for (int i = 0; i < nnz_col; ++i) { /* addr == total # of full columns before current block jj */ A_gpu->scubufs[streamId].usub_IndirectJ3_host[addr + i] = indirectJ3[i]; } } /* end for jj ... calculate usub_indirect */ //printf("sSchurCompUpdate_GPU[3]: jj_cpu %d, nub %d\n", jj_cpu, nub); fflush(stdout); /*sizeof RemainLbuf = Rnbuf*knsupc */ double tTmp = SuperLU_timer_(); gpuEventRecord(A_gpu->ePCIeH2D[k0], FunCallStream); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].usub_IndirectJ3, A_gpu->scubufs[streamId].usub_IndirectJ3_host, ncols * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream)) ; checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Remain_L_buff, Remain_L_buff, Remain_lbuf_send_size * sizeof(float), gpuMemcpyHostToDevice, FunCallStream)) ; checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].bigU, bigU, bigu_send_size * sizeof(float), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Remain_info, Remain_info, RemainBlk * sizeof(Remain_info_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Ublock_info, Ublock_info, mcb * sizeof(Ublock_info_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].lsub, lsub, lsub_len * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].usub, usub, usub_len * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream) ); A_gpu->tHost_PCIeH2D += SuperLU_timer_() - tTmp; A_gpu->cPCIeH2D += Remain_lbuf_send_size * sizeof(float) + bigu_send_size * sizeof(float) + RemainBlk * sizeof(Remain_info_t) + mcb * sizeof(Ublock_info_t) + lsub_len * sizeof(int_t) + usub_len * sizeof(int_t); float alpha = 1.0, beta = 0.0; int_t ii_st = 0; int_t ii_end = 0; int_t maxGemmBlockDim = (int) sqrt(buffer_size); // int_t maxGemmBlockDim = 8000; /* Organize GEMM by blocks of [ii_st : ii_end, jj_st : jj_end] that fits in the buffer_size */ while (ii_end < RemainBlk) { ii_st = ii_end; ii_end = RemainBlk; int_t nrow_max = maxGemmBlockDim; // nrow_max = Rnbrow; int_t remaining_rows = (ii_st == 0) ? Rnbrow : Rnbrow - Remain_info[ii_st - 1].FullRow; nrow_max = (remaining_rows / nrow_max) > 0 ? remaining_rows / CEILING(remaining_rows, nrow_max) : nrow_max; int_t ResRow = (ii_st == 0) ? 0 : Remain_info[ii_st - 1].FullRow; for (int_t i = ii_st; i < RemainBlk - 1; ++i) { if ( Remain_info[i + 1].FullRow > ResRow + nrow_max) { ii_end = i; break; /* row dimension reaches nrow_max */ } } int_t nrows; /* actual row dimension for GEMM */ int_t st_row; if (ii_st > 0) { nrows = Remain_info[ii_end - 1].FullRow - Remain_info[ii_st - 1].FullRow; st_row = Remain_info[ii_st - 1].FullRow; } else { nrows = Remain_info[ii_end - 1].FullRow; st_row = 0; } int jj_st = jj_cpu; int jj_end = jj_cpu; while (jj_end < nub && nrows > 0 ) { int_t remaining_cols = (jj_st == jj_cpu) ? ncols : ncols - Ublock_info[jj_st - 1].full_u_cols; if ( remaining_cols * nrows < buffer_size) { jj_st = jj_end; jj_end = nub; } else /* C matrix cannot fit in buffer, need to break into pieces */ { int_t ncol_max = buffer_size / nrows; /** Must revisit **/ ncol_max = SUPERLU_MIN(ncol_max, maxGemmBlockDim); ncol_max = (remaining_cols / ncol_max) > 0 ? remaining_cols / CEILING(remaining_cols, ncol_max) : ncol_max; jj_st = jj_end; jj_end = nub; int_t ResCol = (jj_st == 0) ? 0 : Ublock_info[jj_st - 1].full_u_cols; for (int_t j = jj_st; j < nub - 1; ++j) { if (Ublock_info[j + 1].full_u_cols > ResCol + ncol_max) { jj_end = j; break; } } } /* end-if-else */ int ncols; int st_col; if (jj_st > 0) { ncols = Ublock_info[jj_end - 1].full_u_cols - Ublock_info[jj_st - 1].full_u_cols; st_col = Ublock_info[jj_st - 1].full_u_cols; if (ncols == 0) exit(0); } else { ncols = Ublock_info[jj_end - 1].full_u_cols; st_col = 0; } /* none of the matrix dimension is zero. */ if (nrows > 0 && ldu > 0 && ncols > 0) { if (nrows * ncols > buffer_size) { printf("!! Matrix size %lld x %lld exceeds buffer_size %lld\n", nrows, ncols, buffer_size); fflush(stdout); } assert(nrows * ncols <= buffer_size); gpublasSetStream(gpublas_handle0, FunCallStream); gpuEventRecord(A_gpu->GemmStart[k0], FunCallStream); gpublasSgemm(gpublas_handle0, GPUBLAS_OP_N, GPUBLAS_OP_N, nrows, ncols, ldu, &alpha, &A_gpu->scubufs[streamId].Remain_L_buff[(knsupc - ldu) * Rnbrow + st_row], Rnbrow, &A_gpu->scubufs[streamId].bigU[st_col * ldu], ldu, &beta, A_gpu->scubufs[streamId].bigV, nrows); // #define SCATTER_OPT #ifdef SCATTER_OPT gpuStreamSynchronize(FunCallStream); #warning this function is synchronous #endif gpuEventRecord(A_gpu->GemmEnd[k0], FunCallStream); A_gpu->GemmFLOPCounter += 2.0 * (double) nrows * ncols * ldu; /* * Scattering the output */ // dim3 dimBlock(THREAD_BLOCK_SIZE); // 1d thread dim3 dimBlock(ldt); // 1d thread dim3 dimGrid(ii_end - ii_st, jj_end - jj_st); hipLaunchKernelGGL(( Scatter_GPU_kernel) , dim3(dimGrid), dim3(dimBlock), (4*ldt + 2*SCATTER_THREAD_BLOCK_SIZE)*sizeof(int), FunCallStream, streamId, ii_st, ii_end, jj_st, jj_end, klst, 0, nrows, ldt, npcol, nprow, dA_gpu); #ifdef SCATTER_OPT gpuStreamSynchronize(FunCallStream); #warning this function is synchrnous #endif gpuEventRecord(A_gpu->ScatterEnd[k0], FunCallStream); A_gpu->ScatterMOPCounter += 3.0 * (double) nrows * ncols; } /* endif ... none of the matrix dimension is zero. */ } /* end while jj_end < nub */ } /* end while (ii_end < RemainBlk) */ return 0; } /* end sSchurCompUpdate_GPU */ static void print_occupancy() { int blockSize; // The launch configurator returned block size int minGridSize; /* The minimum grid size needed to achieve the best potential occupancy */ gpuOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, Scatter_GPU_kernel, 0, 0); printf("Occupancy: MinGridSize %d blocksize %d \n", minGridSize, blockSize); } static void printDevProp(gpuDeviceProp devProp) { size_t mfree, mtotal; gpuMemGetInfo (&mfree, &mtotal); printf("pciBusID: %d\n", devProp.pciBusID); printf("pciDeviceID: %d\n", devProp.pciDeviceID); printf("GPU Name: %s\n", devProp.name); printf("Total global memory: %zu\n", devProp.totalGlobalMem); printf("Total free memory: %zu\n", mfree); printf("Clock rate: %d\n", devProp.clockRate); return; } static size_t get_acc_memory () { size_t mfree, mtotal; gpuMemGetInfo (&mfree, &mtotal); #if 0 printf("Total memory %zu & free memory %zu\n", mtotal, mfree); #endif return (size_t) (0.9 * (double) mfree) / get_mpi_process_per_gpu (); } int sfree_LUstruct_gpu (sLUstruct_gpu_t * A_gpu) { /* Free the L data structure on GPU */ checkGPU(gpuFree(A_gpu->LrowindVec)); checkGPU(gpuFree(A_gpu->LrowindPtr)); checkGPU(gpuFree(A_gpu->LnzvalVec)); checkGPU(gpuFree(A_gpu->LnzvalPtr)); free(A_gpu->LnzvalPtr_host); /*freeing the pinned memory*/ int_t streamId = 0; checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Remain_info_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Ublock_info_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Remain_L_buff_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].bigU_host)); checkGPU(gpuFreeHost(A_gpu->acc_L_buff)); checkGPU(gpuFreeHost(A_gpu->acc_U_buff)); checkGPU(gpuFreeHost(A_gpu->scubufs[streamId].lsub_buf)); checkGPU(gpuFreeHost(A_gpu->scubufs[streamId].usub_buf)); SUPERLU_FREE(A_gpu->isOffloaded); // changed to SUPERLU_MALLOC/SUPERLU_FREE SUPERLU_FREE(A_gpu->GemmStart); SUPERLU_FREE(A_gpu->GemmEnd); SUPERLU_FREE(A_gpu->ScatterEnd); SUPERLU_FREE(A_gpu->ePCIeH2D); SUPERLU_FREE(A_gpu->ePCIeD2H_Start); SUPERLU_FREE(A_gpu->ePCIeD2H_End); /* Free the U data structure on GPU */ checkGPU(gpuFree(A_gpu->UrowindVec)); checkGPU(gpuFree(A_gpu->UrowindPtr)); //free(A_gpu->UrowindPtr_host); // Sherry: this is NOT allocated checkGPU(gpuFree(A_gpu->UnzvalVec)); checkGPU(gpuFree(A_gpu->UnzvalPtr)); checkGPU(gpuFree(A_gpu->grid)); /* Free the Schur complement structure on GPU */ checkGPU(gpuFree(A_gpu->scubufs[streamId].bigV)); checkGPU(gpuFree(A_gpu->scubufs[streamId].bigU)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Remain_L_buff)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Ublock_info)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Remain_info)); // checkGPU(gpuFree(A_gpu->indirect)); // checkGPU(gpuFree(A_gpu->indirect2)); checkGPU(gpuFree(A_gpu->xsup)); checkGPU(gpuFree(A_gpu->scubufs[streamId].lsub)); checkGPU(gpuFree(A_gpu->scubufs[streamId].usub)); checkGPU(gpuFree(A_gpu->local_l_blk_infoVec)); checkGPU(gpuFree(A_gpu->local_l_blk_infoPtr)); checkGPU(gpuFree(A_gpu->jib_lookupVec)); checkGPU(gpuFree(A_gpu->jib_lookupPtr)); checkGPU(gpuFree(A_gpu->local_u_blk_infoVec)); checkGPU(gpuFree(A_gpu->local_u_blk_infoPtr)); checkGPU(gpuFree(A_gpu->ijb_lookupVec)); checkGPU(gpuFree(A_gpu->ijb_lookupPtr)); return 0; } void sPrint_matrix( char *desc, int_t m, int_t n, float * dA, int_t lda ) { float *cPtr = (float *) malloc(sizeof(float) * lda * n); checkGPU(gpuMemcpy( cPtr, dA, lda * n * sizeof(float), gpuMemcpyDeviceToHost)) ; int_t i, j; printf( "\n %s\n", desc ); for ( i = 0; i < m; i++ ) { for ( j = 0; j < n; j++ ) printf( " %.3e", cPtr[i + j * lda] ); printf( "\n" ); } free(cPtr); } void sprintGPUStats(sLUstruct_gpu_t * A_gpu) { double tGemm = 0; double tScatter = 0; double tPCIeH2D = 0; double tPCIeD2H = 0; for (int_t i = 0; i < A_gpu->nsupers; ++i) { float milliseconds = 0; if (A_gpu->isOffloaded[i]) { gpuEventElapsedTime(&milliseconds, A_gpu->ePCIeH2D[i], A_gpu->GemmStart[i]); tPCIeH2D += 1e-3 * (double) milliseconds; milliseconds = 0; gpuEventElapsedTime(&milliseconds, A_gpu->GemmStart[i], A_gpu->GemmEnd[i]); tGemm += 1e-3 * (double) milliseconds; milliseconds = 0; gpuEventElapsedTime(&milliseconds, A_gpu->GemmEnd[i], A_gpu->ScatterEnd[i]); tScatter += 1e-3 * (double) milliseconds; } milliseconds = 0; gpuEventElapsedTime(&milliseconds, A_gpu->ePCIeD2H_Start[i], A_gpu->ePCIeD2H_End[i]); tPCIeD2H += 1e-3 * (double) milliseconds; } printf("GPU: Flops offloaded %.3e Time spent %lf Flop rate %lf GF/sec \n", A_gpu->GemmFLOPCounter, tGemm, 1e-9 * A_gpu->GemmFLOPCounter / tGemm ); printf("GPU: Mop offloaded %.3e Time spent %lf Bandwidth %lf GByte/sec \n", A_gpu->ScatterMOPCounter, tScatter, 8e-9 * A_gpu->ScatterMOPCounter / tScatter ); printf("PCIe Data Transfer H2D:\n\tData Sent %.3e(GB)\n\tTime observed from CPU %lf\n\tActual time spent %lf\n\tBandwidth %lf GByte/sec \n", 1e-9 * A_gpu->cPCIeH2D, A_gpu->tHost_PCIeH2D, tPCIeH2D, 1e-9 * A_gpu->cPCIeH2D / tPCIeH2D ); printf("PCIe Data Transfer D2H:\n\tData Sent %.3e(GB)\n\tTime observed from CPU %lf\n\tActual time spent %lf\n\tBandwidth %lf GByte/sec \n", 1e-9 * A_gpu->cPCIeD2H, A_gpu->tHost_PCIeD2H, tPCIeD2H, 1e-9 * A_gpu->cPCIeD2H / tPCIeD2H ); fflush(stdout); } /* end printGPUStats */ /* Initialize the GPU side of the data structure. */ int sinitSluGPU3D_t( ssluGPU_t *sluGPU, // LU structures on GPU, see slustruct_gpu.h sLUstruct_t *LUstruct, gridinfo3d_t * grid3d, int_t* perm_c_supno, int_t n, int_t buffer_size, /* read from env variable MAX_BUFFER_SIZE */ int_t bigu_size, int_t ldt /* NSUP read from sp_ienv(3) */ ) { checkGPUErrors(gpuDeviceReset ()) ; Glu_persist_t *Glu_persist = LUstruct->Glu_persist; sLocalLU_t *Llu = LUstruct->Llu; int* isNodeInMyGrid = sluGPU->isNodeInMyGrid; sluGPU->nGPUStreams = getnGPUStreams(); int SCATTER_THREAD_BLOCK_SIZE = ldt; if(getenv("SCATTER_THREAD_BLOCK_SIZE")) { int stbs = atoi(getenv("SCATTER_THREAD_BLOCK_SIZE")); if(stbs>=ldt) { SCATTER_THREAD_BLOCK_SIZE = stbs; } } if (grid3d->iam == 0) { printf("dinitSluGPU3D_t: Using hardware acceleration, with %d gpu streams \n", sluGPU->nGPUStreams); fflush(stdout); printf("dinitSluGPU3D_t: Using %d threads per block for scatter \n", SCATTER_THREAD_BLOCK_SIZE); if ( MAX_SUPER_SIZE < ldt ) { ABORT("MAX_SUPER_SIZE smaller than requested NSUP"); } } gpuStreamCreate(&(sluGPU->CopyStream)); for (int streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { gpuStreamCreate(&(sluGPU->funCallStreams[streamId])); gpublasCreate(&(sluGPU->gpublasHandles[streamId])); sluGPU->lastOffloadStream[streamId] = -1; } sluGPU->A_gpu = (sLUstruct_gpu_t *) malloc (sizeof(sLUstruct_gpu_t)); sluGPU->A_gpu->perm_c_supno = perm_c_supno; /* Allocate GPU memory for the LU data structures, and copy the host LU structure to GPU side. */ sCopyLUToGPU3D ( isNodeInMyGrid, Llu, /* referred to as A_host */ sluGPU, Glu_persist, n, grid3d, buffer_size, bigu_size, ldt ); return 0; } /* end sinitSluGPU3D_t */ int sinitD2Hreduce( int next_k, d2Hreduce_t* d2Hred, int last_flag, HyP_t* HyP, ssluGPU_t *sluGPU, gridinfo_t *grid, sLUstruct_t *LUstruct, SCT_t* SCT ) { Glu_persist_t *Glu_persist = LUstruct->Glu_persist; sLocalLU_t *Llu = LUstruct->Llu; int_t* xsup = Glu_persist->xsup; int_t iam = grid->iam; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t** Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; int_t** Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; // int_t next_col = SUPERLU_MIN (k0 + num_look_aheads + 1, nsupers - 1); // int_t next_k = perm_c_supno[next_col]; /* global block number for next colum*/ int_t mkcol, mkrow; int_t kljb = LBj( next_k, grid ); /*local block number for next block*/ int_t kijb = LBi( next_k, grid ); /*local block number for next block*/ int_t *kindexL ; /*for storing index vectors*/ int_t *kindexU ; mkrow = PROW (next_k, grid); mkcol = PCOL (next_k, grid); int_t ksup_size = SuperSize(next_k); int_t copyL_kljb = 0; int_t copyU_kljb = 0; int_t l_copy_len = 0; int_t u_copy_len = 0; if (mkcol == mycol && Lrowind_bc_ptr[kljb] != NULL && last_flag) { if (HyP->Lblock_dirty_bit[kljb] > -1) { copyL_kljb = 1; int_t lastk0 = HyP->Lblock_dirty_bit[kljb]; int_t streamIdk0Offload = lastk0 % sluGPU->nGPUStreams; if (sluGPU->lastOffloadStream[streamIdk0Offload] == lastk0 && lastk0 != -1) { // printf("Waiting for Offload =%d to finish StreamId=%d\n", lastk0, streamIdk0Offload); double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamIdk0Offload]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamIdk0Offload] = -1; } } kindexL = Lrowind_bc_ptr[kljb]; l_copy_len = kindexL[1] * ksup_size; } if ( mkrow == myrow && Ufstnz_br_ptr[kijb] != NULL && last_flag ) { if (HyP->Ublock_dirty_bit[kijb] > -1) { copyU_kljb = 1; int_t lastk0 = HyP->Ublock_dirty_bit[kijb]; int_t streamIdk0Offload = lastk0 % sluGPU->nGPUStreams; if (sluGPU->lastOffloadStream[streamIdk0Offload] == lastk0 && lastk0 != -1) { // printf("Waiting for Offload =%d to finish StreamId=%d\n", lastk0, streamIdk0Offload); double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamIdk0Offload]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamIdk0Offload] = -1; } } // copyU_kljb = HyP->Ublock_dirty_bit[kijb]>-1? 1: 0; kindexU = Ufstnz_br_ptr[kijb]; u_copy_len = kindexU[1]; } // wait for streams if they have not been finished // d2Hred->next_col = next_col; d2Hred->next_k = next_k; d2Hred->kljb = kljb; d2Hred->kijb = kijb; d2Hred->copyL_kljb = copyL_kljb; d2Hred->copyU_kljb = copyU_kljb; d2Hred->l_copy_len = l_copy_len; d2Hred->u_copy_len = u_copy_len; d2Hred->kindexU = kindexU; d2Hred->kindexL = kindexL; d2Hred->mkrow = mkrow; d2Hred->mkcol = mkcol; d2Hred->ksup_size = ksup_size; return 0; } /* sinitD2Hreduce */ int sreduceGPUlu( int last_flag, d2Hreduce_t* d2Hred, ssluGPU_t *sluGPU, SCT_t *SCT, gridinfo_t *grid, sLUstruct_t *LUstruct ) { sLocalLU_t *Llu = LUstruct->Llu; int iam = grid->iam; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t** Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; float** Lnzval_bc_ptr = Llu->Lnzval_bc_ptr; int_t** Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; float** Unzval_br_ptr = Llu->Unzval_br_ptr; gpuStream_t CopyStream; sLUstruct_gpu_t *A_gpu; A_gpu = sluGPU->A_gpu; CopyStream = sluGPU->CopyStream; int_t kljb = d2Hred->kljb; int_t kijb = d2Hred->kijb; int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; int_t mkrow = d2Hred->mkrow; int_t mkcol = d2Hred->mkcol; int_t ksup_size = d2Hred->ksup_size; int_t *kindex; if ((copyL_kljb || copyU_kljb) && last_flag ) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(CopyStream); SCT->PhiWaitTimer_2 += SuperLU_timer_() - ttx; } double tt_start = SuperLU_timer_(); if (last_flag) { if (mkcol == mycol && Lrowind_bc_ptr[kljb] != NULL ) { kindex = Lrowind_bc_ptr[kljb]; int_t len = kindex[1]; if (copyL_kljb) { float *nzval_host; nzval_host = Lnzval_bc_ptr[kljb]; int_t llen = ksup_size * len; float alpha = 1; superlu_saxpy (llen, alpha, A_gpu->acc_L_buff, 1, nzval_host, 1); } } } if (last_flag) { if (mkrow == myrow && Ufstnz_br_ptr[kijb] != NULL ) { kindex = Ufstnz_br_ptr[kijb]; int_t len = kindex[1]; if (copyU_kljb) { float *nzval_host; nzval_host = Unzval_br_ptr[kijb]; float alpha = 1; superlu_saxpy (len, alpha, A_gpu->acc_U_buff, 1, nzval_host, 1); } } } double tt_end = SuperLU_timer_(); SCT->AssemblyTimer += tt_end - tt_start; return 0; } /* sreduceGPUlu */ int swaitGPUscu(int streamId, ssluGPU_t *sluGPU, SCT_t *SCT) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; return 0; } int ssendLUpanelGPU2HOST( int_t k0, d2Hreduce_t* d2Hred, ssluGPU_t *sluGPU ) { int_t kljb = d2Hred->kljb; int_t kijb = d2Hred->kijb; int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; int_t l_copy_len = d2Hred->l_copy_len; int_t u_copy_len = d2Hred->u_copy_len; gpuStream_t CopyStream = sluGPU->CopyStream;; sLUstruct_gpu_t *A_gpu = sluGPU->A_gpu; double tty = SuperLU_timer_(); gpuEventRecord(A_gpu->ePCIeD2H_Start[k0], CopyStream); if (copyL_kljb) checkGPU(gpuMemcpyAsync(A_gpu->acc_L_buff, &A_gpu->LnzvalVec[A_gpu->LnzvalPtr_host[kljb]], l_copy_len * sizeof(float), gpuMemcpyDeviceToHost, CopyStream ) ); if (copyU_kljb) checkGPU(gpuMemcpyAsync(A_gpu->acc_U_buff, &A_gpu->UnzvalVec[A_gpu->UnzvalPtr_host[kijb]], u_copy_len * sizeof(float), gpuMemcpyDeviceToHost, CopyStream ) ); gpuEventRecord(A_gpu->ePCIeD2H_End[k0], CopyStream); A_gpu->tHost_PCIeD2H += SuperLU_timer_() - tty; A_gpu->cPCIeD2H += u_copy_len * sizeof(float) + l_copy_len * sizeof(float); return 0; } /* Copy L and U panel data structures from host to the host part of the data structures in A_gpu. GPU is not involved in this routine. */ int ssendSCUdataHost2GPU( int_t streamId, int_t* lsub, int_t* usub, float* bigU, int_t bigu_send_size, int_t Remain_lbuf_send_size, ssluGPU_t *sluGPU, HyP_t* HyP ) { //{printf("....[enter] ssendSCUdataHost2GPU, bigu_send_size %d\n", bigu_send_size); fflush(stdout);} int_t usub_len = usub[2]; int_t lsub_len = lsub[1] + BC_HEADER + lsub[0] * LB_DESCRIPTOR; //{printf("....[2] in ssendSCUdataHost2GPU, lsub_len %d\n", lsub_len); fflush(stdout);} sLUstruct_gpu_t *A_gpu = sluGPU->A_gpu; memcpy(A_gpu->scubufs[streamId].lsub_buf, lsub, sizeof(int_t)*lsub_len); memcpy(A_gpu->scubufs[streamId].usub_buf, usub, sizeof(int_t)*usub_len); memcpy(A_gpu->scubufs[streamId].Remain_info_host, HyP->Remain_info, sizeof(Remain_info_t)*HyP->RemainBlk); memcpy(A_gpu->scubufs[streamId].Ublock_info_host, HyP->Ublock_info_Phi, sizeof(Ublock_info_t)*HyP->num_u_blks_Phi); memcpy(A_gpu->scubufs[streamId].Remain_L_buff_host, HyP->Remain_L_buff, sizeof(float)*Remain_lbuf_send_size); memcpy(A_gpu->scubufs[streamId].bigU_host, bigU, sizeof(float)*bigu_send_size); return 0; } /* Sherry: not used ?*/ #if 0 int freeSluGPU(ssluGPU_t *sluGPU) { return 0; } #endif /* Allocate GPU memory for the LU data structures, and copy the host LU structure to GPU side. After factorization, the GPU LU structure should be freed by calling sfree_LUsstruct_gpu(). */ void sCopyLUToGPU3D ( int* isNodeInMyGrid, sLocalLU_t *A_host, /* distributed LU structure on host */ ssluGPU_t *sluGPU, /* hold LU structure on GPU */ Glu_persist_t *Glu_persist, int_t n, gridinfo3d_t *grid3d, int_t buffer_size, /* bigV size on GPU for Schur complement update */ int_t bigu_size, int_t ldt ) { gridinfo_t* grid = &(grid3d->grid2d); sLUstruct_gpu_t * A_gpu = sluGPU->A_gpu; sLUstruct_gpu_t **dA_gpu = &(sluGPU->dA_gpu); #if ( PRNTlevel>=1 ) if ( grid3d->iam == 0 ) print_occupancy(); #endif #ifdef GPU_DEBUG // if ( grid3d->iam == 0 ) { gpuDeviceProp devProp; gpuGetDeviceProperties(&devProp, 0); printDevProp(devProp); } #endif int_t *xsup ; xsup = Glu_persist->xsup; int iam = grid->iam; int nsupers = Glu_persist->supno[n - 1] + 1; int_t Pc = grid->npcol; int_t Pr = grid->nprow; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t mrb = (nsupers + Pr - 1) / Pr; int_t mcb = (nsupers + Pc - 1) / Pc; int_t remain_l_max = A_host->bufmax[1]; /*copies of scalars for easy access*/ A_gpu->nsupers = nsupers; A_gpu->ScatterMOPCounter = 0; A_gpu->GemmFLOPCounter = 0; A_gpu->cPCIeH2D = 0; A_gpu->cPCIeD2H = 0; A_gpu->tHost_PCIeH2D = 0; A_gpu->tHost_PCIeD2H = 0; /*initializing memory*/ size_t max_gpu_memory = get_acc_memory (); size_t gpu_mem_used = 0; void *tmp_ptr; A_gpu->xsup_host = xsup; int_t nGPUStreams = sluGPU->nGPUStreams; /*pinned memory allocations. Paged-locked memory by gpuMallocHost is accessible to the device.*/ for (int streamId = 0; streamId < nGPUStreams; streamId++ ) { void *tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, (n) * sizeof(int_t) )) ; A_gpu->scubufs[streamId].usub_IndirectJ3_host = (int_t*) tmp_ptr; checkGPUErrors(gpuMalloc( &tmp_ptr, ( n) * sizeof(int_t) )); A_gpu->scubufs[streamId].usub_IndirectJ3 = (int_t*) tmp_ptr; gpu_mem_used += ( n) * sizeof(int_t); checkGPUErrors(gpuMallocHost( &tmp_ptr, mrb * sizeof(Remain_info_t) )) ; A_gpu->scubufs[streamId].Remain_info_host = (Remain_info_t*)tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, mcb * sizeof(Ublock_info_t) )) ; A_gpu->scubufs[streamId].Ublock_info_host = (Ublock_info_t*)tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, remain_l_max * sizeof(float) )) ; A_gpu->scubufs[streamId].Remain_L_buff_host = (float *) tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, bigu_size * sizeof(float) )) ; A_gpu->scubufs[streamId].bigU_host = (float *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(float) * (A_host->bufmax[1]))); A_gpu->acc_L_buff = (float *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(float) * (A_host->bufmax[3]))); A_gpu->acc_U_buff = (float *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(int_t) * (A_host->bufmax[0]))); A_gpu->scubufs[streamId].lsub_buf = (int_t *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(int_t) * (A_host->bufmax[2]))); A_gpu->scubufs[streamId].usub_buf = (int_t *) tmp_ptr; checkGPUErrors(gpuMalloc( &tmp_ptr, remain_l_max * sizeof(float) )) ; A_gpu->scubufs[streamId].Remain_L_buff = (float *) tmp_ptr; gpu_mem_used += remain_l_max * sizeof(float); checkGPUErrors(gpuMalloc( &tmp_ptr, bigu_size * sizeof(float) )) ; A_gpu->scubufs[streamId].bigU = (float *) tmp_ptr; gpu_mem_used += bigu_size * sizeof(float); checkGPUErrors(gpuMalloc( &tmp_ptr, mcb * sizeof(Ublock_info_t) )) ; A_gpu->scubufs[streamId].Ublock_info = (Ublock_info_t *) tmp_ptr; gpu_mem_used += mcb * sizeof(Ublock_info_t); checkGPUErrors(gpuMalloc( &tmp_ptr, mrb * sizeof(Remain_info_t) )) ; A_gpu->scubufs[streamId].Remain_info = (Remain_info_t *) tmp_ptr; gpu_mem_used += mrb * sizeof(Remain_info_t); checkGPUErrors(gpuMalloc( &tmp_ptr, buffer_size * sizeof(float))) ; A_gpu->scubufs[streamId].bigV = (float *) tmp_ptr; gpu_mem_used += buffer_size * sizeof(float); checkGPUErrors(gpuMalloc( &tmp_ptr, A_host->bufmax[0]*sizeof(int_t))) ; A_gpu->scubufs[streamId].lsub = (int_t *) tmp_ptr; gpu_mem_used += A_host->bufmax[0] * sizeof(int_t); checkGPUErrors(gpuMalloc( &tmp_ptr, A_host->bufmax[2]*sizeof(int_t))) ; A_gpu->scubufs[streamId].usub = (int_t *) tmp_ptr; gpu_mem_used += A_host->bufmax[2] * sizeof(int_t); } /* endfor streamID ... allocate paged-locked memory */ A_gpu->isOffloaded = (int *) SUPERLU_MALLOC (sizeof(int) * nsupers); A_gpu->GemmStart = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); A_gpu->GemmEnd = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); A_gpu->ScatterEnd = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); A_gpu->ePCIeH2D = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); A_gpu->ePCIeD2H_Start = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); A_gpu->ePCIeD2H_End = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); for (int i = 0; i < nsupers; ++i) { A_gpu->isOffloaded[i] = 0; checkGPUErrors(gpuEventCreate(&(A_gpu->GemmStart[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->GemmEnd[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->ScatterEnd[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->ePCIeH2D[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->ePCIeH2D[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->ePCIeD2H_Start[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->ePCIeD2H_End[i]))); } /*---- Copy L data structure to GPU ----*/ /*pointers and address of local blocks for easy accessibility */ local_l_blk_info_t *local_l_blk_infoVec; int_t * local_l_blk_infoPtr; local_l_blk_infoPtr = (int_t *) malloc( CEILING(nsupers, Pc) * sizeof(int_t ) ); /* First pass: count total L blocks */ int_t cum_num_l_blocks = 0; /* total number of L blocks I own */ for (int_t i = 0; i < CEILING(nsupers, Pc); ++i) { /* going through each block column I own */ if (A_host->Lrowind_bc_ptr[i] != NULL && isNodeInMyGrid[i * Pc + mycol] == 1) { int_t *index = A_host->Lrowind_bc_ptr[i]; int_t num_l_blocks = index[0]; cum_num_l_blocks += num_l_blocks; } } /*allocating memory*/ local_l_blk_infoVec = (local_l_blk_info_t *) malloc(cum_num_l_blocks * sizeof(local_l_blk_info_t)); /* Second pass: set up the meta-data for the L structure */ cum_num_l_blocks = 0; /*initialzing vectors */ for (int_t i = 0; i < CEILING(nsupers, Pc); ++i) { if (A_host->Lrowind_bc_ptr[i] != NULL && isNodeInMyGrid[i * Pc + mycol] == 1) { int_t *index = A_host->Lrowind_bc_ptr[i]; int_t num_l_blocks = index[0]; /* # L blocks in this column */ if (num_l_blocks > 0) { local_l_blk_info_t *local_l_blk_info_i = local_l_blk_infoVec + cum_num_l_blocks; local_l_blk_infoPtr[i] = cum_num_l_blocks; int_t lptrj = BC_HEADER; int_t luptrj = 0; for (int_t j = 0; j < num_l_blocks ; ++j) { int_t ijb = index[lptrj]; local_l_blk_info_i[j].lib = ijb / Pr; local_l_blk_info_i[j].lptrj = lptrj; local_l_blk_info_i[j].luptrj = luptrj; luptrj += index[lptrj + 1]; lptrj += LB_DESCRIPTOR + index[lptrj + 1]; } } cum_num_l_blocks += num_l_blocks; } } /* endfor all block columns */ /* Allocate L memory on GPU, and copy the values from CPU to GPU */ checkGPUErrors(gpuMalloc( &tmp_ptr, cum_num_l_blocks * sizeof(local_l_blk_info_t))) ; A_gpu->local_l_blk_infoVec = (local_l_blk_info_t *) tmp_ptr; gpu_mem_used += cum_num_l_blocks * sizeof(local_l_blk_info_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_l_blk_infoVec), local_l_blk_infoVec, cum_num_l_blocks * sizeof(local_l_blk_info_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, CEILING(nsupers, Pc)*sizeof(int_t))) ; A_gpu->local_l_blk_infoPtr = (int_t *) tmp_ptr; gpu_mem_used += CEILING(nsupers, Pc) * sizeof(int_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_l_blk_infoPtr), local_l_blk_infoPtr, CEILING(nsupers, Pc)*sizeof(int_t), gpuMemcpyHostToDevice)) ; /*---- Copy U data structure to GPU ----*/ local_u_blk_info_t *local_u_blk_infoVec; int_t * local_u_blk_infoPtr; local_u_blk_infoPtr = (int_t *) malloc( CEILING(nsupers, Pr) * sizeof(int_t ) ); /* First pass: count total U blocks */ int_t cum_num_u_blocks = 0; for (int_t i = 0; i < CEILING(nsupers, Pr); ++i) { if (A_host->Ufstnz_br_ptr[i] != NULL && isNodeInMyGrid[i * Pr + myrow] == 1) { int_t *index = A_host->Ufstnz_br_ptr[i]; int_t num_u_blocks = index[0]; cum_num_u_blocks += num_u_blocks; } } local_u_blk_infoVec = (local_u_blk_info_t *) malloc(cum_num_u_blocks * sizeof(local_u_blk_info_t)); /* Second pass: set up the meta-data for the U structure */ cum_num_u_blocks = 0; for (int_t i = 0; i < CEILING(nsupers, Pr); ++i) { if (A_host->Ufstnz_br_ptr[i] != NULL && isNodeInMyGrid[i * Pr + myrow] == 1) { int_t *index = A_host->Ufstnz_br_ptr[i]; int_t num_u_blocks = index[0]; if (num_u_blocks > 0) { local_u_blk_info_t *local_u_blk_info_i = local_u_blk_infoVec + cum_num_u_blocks; local_u_blk_infoPtr[i] = cum_num_u_blocks; int_t iuip_lib, ruip_lib; iuip_lib = BR_HEADER; ruip_lib = 0; for (int_t j = 0; j < num_u_blocks ; ++j) { int_t ijb = index[iuip_lib]; local_u_blk_info_i[j].ljb = ijb / Pc; local_u_blk_info_i[j].iuip = iuip_lib; local_u_blk_info_i[j].ruip = ruip_lib; ruip_lib += index[iuip_lib + 1]; iuip_lib += UB_DESCRIPTOR + SuperSize (ijb); } } cum_num_u_blocks += num_u_blocks; } } checkGPUErrors(gpuMalloc( &tmp_ptr, cum_num_u_blocks * sizeof(local_u_blk_info_t))) ; A_gpu->local_u_blk_infoVec = (local_u_blk_info_t *) tmp_ptr; gpu_mem_used += cum_num_u_blocks * sizeof(local_u_blk_info_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_u_blk_infoVec), local_u_blk_infoVec, cum_num_u_blocks * sizeof(local_u_blk_info_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, CEILING(nsupers, Pr)*sizeof(int_t))) ; A_gpu->local_u_blk_infoPtr = (int_t *) tmp_ptr; gpu_mem_used += CEILING(nsupers, Pr) * sizeof(int_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_u_blk_infoPtr), local_u_blk_infoPtr, CEILING(nsupers, Pr)*sizeof(int_t), gpuMemcpyHostToDevice)) ; /* Copy the actual L indices and values */ int_t l_k = CEILING( nsupers, grid->npcol ); /* # of local block columns */ int_t *temp_LrowindPtr = (int_t *) malloc(sizeof(int_t) * l_k); int_t *temp_LnzvalPtr = (int_t *) malloc(sizeof(int_t) * l_k); int_t *Lnzval_size = (int_t *) malloc(sizeof(int_t) * l_k); int_t l_ind_len = 0; int_t l_val_len = 0; for (int_t jb = 0; jb < nsupers; ++jb) /* for each block column ... */ { int_t pc = PCOL( jb, grid ); if (mycol == pc && isNodeInMyGrid[jb] == 1) { int_t ljb = LBj( jb, grid ); /* Local block number */ int_t *index_host; index_host = A_host->Lrowind_bc_ptr[ljb]; temp_LrowindPtr[ljb] = l_ind_len; temp_LnzvalPtr[ljb] = l_val_len; // ### Lnzval_size[ljb] = 0; //### if (index_host != NULL) { int_t nrbl = index_host[0]; /* number of L blocks */ int_t len = index_host[1]; /* LDA of the nzval[] */ int_t len1 = len + BC_HEADER + nrbl * LB_DESCRIPTOR; /* Global block number is mycol + ljb*Pc */ int_t nsupc = SuperSize(jb); l_ind_len += len1; l_val_len += len * nsupc; Lnzval_size[ljb] = len * nsupc ; // ### } else { Lnzval_size[ljb] = 0 ; // ### } } } /* endfor jb = 0 ... */ /* Copy the actual U indices and values */ int_t u_k = CEILING( nsupers, grid->nprow ); /* Number of local block rows */ int_t *temp_UrowindPtr = (int_t *) malloc(sizeof(int_t) * u_k); int_t *temp_UnzvalPtr = (int_t *) malloc(sizeof(int_t) * u_k); int_t *Unzval_size = (int_t *) malloc(sizeof(int_t) * u_k); int_t u_ind_len = 0; int_t u_val_len = 0; for ( int_t lb = 0; lb < u_k; ++lb) { int_t *index_host; index_host = A_host->Ufstnz_br_ptr[lb]; temp_UrowindPtr[lb] = u_ind_len; temp_UnzvalPtr[lb] = u_val_len; Unzval_size[lb] = 0; if (index_host != NULL && isNodeInMyGrid[lb * Pr + myrow] == 1) { int_t len = index_host[1]; int_t len1 = index_host[2]; u_ind_len += len1; u_val_len += len; Unzval_size[lb] = len; } else { Unzval_size[lb] = 0; } } gpu_mem_used += l_ind_len * sizeof(int_t); gpu_mem_used += 2 * l_k * sizeof(int_t); gpu_mem_used += u_ind_len * sizeof(int_t); gpu_mem_used += 2 * u_k * sizeof(int_t); /*left memory shall be divided among the two */ for (int_t i = 0; i < l_k; ++i) { temp_LnzvalPtr[i] = -1; } for (int_t i = 0; i < u_k; ++i) { temp_UnzvalPtr[i] = -1; } /*setting these pointers back */ l_val_len = 0; u_val_len = 0; int_t num_gpu_l_blocks = 0; int_t num_gpu_u_blocks = 0; size_t mem_l_block, mem_u_block; /* Find the trailing matrix size that can fit into GPU memory */ for (int_t i = nsupers - 1; i > -1; --i) { /* ulte se chalte hai eleimination tree */ /* bottom up ordering */ int_t i_sup = A_gpu->perm_c_supno[i]; int_t pc = PCOL( i_sup, grid ); if (isNodeInMyGrid[i_sup] == 1) { if (mycol == pc ) { int_t ljb = LBj(i_sup, grid); mem_l_block = sizeof(float) * Lnzval_size[ljb]; if (gpu_mem_used + mem_l_block > max_gpu_memory) { break; } else { gpu_mem_used += mem_l_block; temp_LnzvalPtr[ljb] = l_val_len; l_val_len += Lnzval_size[ljb]; num_gpu_l_blocks++; A_gpu->first_l_block_gpu = i; } } int_t pr = PROW( i_sup, grid ); if (myrow == pr) { int_t lib = LBi(i_sup, grid); mem_u_block = sizeof(float) * Unzval_size[lib]; if (gpu_mem_used + mem_u_block > max_gpu_memory) { break; } else { gpu_mem_used += mem_u_block; temp_UnzvalPtr[lib] = u_val_len; u_val_len += Unzval_size[lib]; num_gpu_u_blocks++; A_gpu->first_u_block_gpu = i; } } } /* endif */ } /* endfor i .... nsupers */ #if (PRNTlevel>=2) printf("(%d) Number of L blocks in GPU %d, U blocks %d\n", grid3d->iam, num_gpu_l_blocks, num_gpu_u_blocks ); printf("(%d) elimination order of first block in GPU: L block %d, U block %d\n", grid3d->iam, A_gpu->first_l_block_gpu, A_gpu->first_u_block_gpu); printf("(%d) Memory of L %.1f GB, memory for U %.1f GB, Total device memory used %.1f GB, Memory allowed %.1f GB \n", grid3d->iam, l_val_len * sizeof(float) * 1e-9, u_val_len * sizeof(float) * 1e-9, gpu_mem_used * 1e-9, max_gpu_memory * 1e-9); fflush(stdout); #endif /* Assemble index vector on temp */ int_t *indtemp = (int_t *) malloc(sizeof(int_t) * l_ind_len); for (int_t jb = 0; jb < nsupers; ++jb) /* for each block column ... */ { int_t pc = PCOL( jb, grid ); if (mycol == pc && isNodeInMyGrid[jb] == 1) { int_t ljb = LBj( jb, grid ); /* Local block number */ int_t *index_host; index_host = A_host->Lrowind_bc_ptr[ljb]; if (index_host != NULL) { int_t nrbl = index_host[0]; /* number of L blocks */ int_t len = index_host[1]; /* LDA of the nzval[] */ int_t len1 = len + BC_HEADER + nrbl * LB_DESCRIPTOR; memcpy(&indtemp[temp_LrowindPtr[ljb]] , index_host, len1 * sizeof(int_t)) ; } } } checkGPUErrors(gpuMalloc( &tmp_ptr, l_ind_len * sizeof(int_t))) ; A_gpu->LrowindVec = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LrowindVec), indtemp, l_ind_len * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, l_val_len * sizeof(float))); A_gpu->LnzvalVec = (float *) tmp_ptr; checkGPUErrors(gpuMemset( (A_gpu->LnzvalVec), 0, l_val_len * sizeof(float))); checkGPUErrors(gpuMalloc( &tmp_ptr, l_k * sizeof(int_t))) ; A_gpu->LrowindPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LrowindPtr), temp_LrowindPtr, l_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, l_k * sizeof(int_t))) ; A_gpu->LnzvalPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LnzvalPtr), temp_LnzvalPtr, l_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; A_gpu->LnzvalPtr_host = temp_LnzvalPtr; int_t *indtemp1 = (int_t *) malloc(sizeof(int_t) * u_ind_len); for ( int_t lb = 0; lb < u_k; ++lb) { int_t *index_host; index_host = A_host->Ufstnz_br_ptr[lb]; if (index_host != NULL && isNodeInMyGrid[lb * Pr + myrow] == 1) { int_t len1 = index_host[2]; memcpy(&indtemp1[temp_UrowindPtr[lb]] , index_host, sizeof(int_t)*len1); } } checkGPUErrors(gpuMalloc( &tmp_ptr, u_ind_len * sizeof(int_t))) ; A_gpu->UrowindVec = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UrowindVec), indtemp1, u_ind_len * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, u_val_len * sizeof(float))); A_gpu->UnzvalVec = (float *) tmp_ptr; checkGPUErrors(gpuMemset( (A_gpu->UnzvalVec), 0, u_val_len * sizeof(float))); checkGPUErrors(gpuMalloc( &tmp_ptr, u_k * sizeof(int_t))) ; A_gpu->UrowindPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UrowindPtr), temp_UrowindPtr, u_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; A_gpu->UnzvalPtr_host = temp_UnzvalPtr; checkGPUErrors(gpuMalloc( &tmp_ptr, u_k * sizeof(int_t))) ; A_gpu->UnzvalPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UnzvalPtr), temp_UnzvalPtr, u_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, (nsupers + 1)*sizeof(int_t))) ; A_gpu->xsup = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->xsup), xsup, (nsupers + 1)*sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, sizeof(sLUstruct_gpu_t))) ; *dA_gpu = (sLUstruct_gpu_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( *dA_gpu, A_gpu, sizeof(sLUstruct_gpu_t), gpuMemcpyHostToDevice)) ; free (temp_LrowindPtr); free (temp_UrowindPtr); free (indtemp1); free (indtemp); } /* end sCopyLUToGPU3D */ int sreduceAllAncestors3d_GPU(int_t ilvl, int_t* myNodeCount, int_t** treePerm, sLUValSubBuf_t*LUvsb, sLUstruct_t* LUstruct, gridinfo3d_t* grid3d, ssluGPU_t *sluGPU, d2Hreduce_t* d2Hred, factStat_t *factStat, HyP_t* HyP, SCT_t* SCT ) { // first synchronize all gpu streams int superlu_acc_offload = HyP->superlu_acc_offload; int_t maxLvl = log2i( (int_t) grid3d->zscp.Np) + 1; int_t myGrid = grid3d->zscp.Iam; gridinfo_t* grid = &(grid3d->grid2d); int_t* gpuLUreduced = factStat->gpuLUreduced; int_t sender; if ((myGrid % (1 << (ilvl + 1))) == 0) { sender = myGrid + (1 << ilvl); } else { sender = myGrid; } /*Reduce all the ancestors from the GPU*/ if (myGrid == sender && superlu_acc_offload) { for (int_t streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamId] = -1; } for (int_t alvl = ilvl + 1; alvl < maxLvl; ++alvl) { /* code */ // int_t atree = myTreeIdxs[alvl]; int_t nsAncestor = myNodeCount[alvl]; int_t* cAncestorList = treePerm[alvl]; for (int_t node = 0; node < nsAncestor; node++ ) { int_t k = cAncestorList[node]; if (!gpuLUreduced[k]) { sinitD2Hreduce(k, d2Hred, 1, HyP, sluGPU, grid, LUstruct, SCT); int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; double tt_start1 = SuperLU_timer_(); SCT->PhiMemCpyTimer += SuperLU_timer_() - tt_start1; if (copyL_kljb || copyU_kljb) SCT->PhiMemCpyCounter++; ssendLUpanelGPU2HOST(k, d2Hred, sluGPU); /* Reduce the LU panels from GPU */ sreduceGPUlu(1, d2Hred, sluGPU, SCT, grid, LUstruct); gpuLUreduced[k] = 1; } } } } /*if (myGrid == sender)*/ sreduceAllAncestors3d(ilvl, myNodeCount, treePerm, LUvsb, LUstruct, grid3d, SCT ); return 0; } /* sreduceAllAncestors3d_GPU */ void ssyncAllfunCallStreams(ssluGPU_t* sluGPU, SCT_t* SCT) { for (int streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamId] = -1; } }
ef691f65142406b072e1a9b13f56df6008e2c507.cu
/*! @file * \brief Descriptions and declarations for structures used in GPU * * <pre> * -- Distributed SuperLU routine (version 7.2) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley, * Georgia Institute of Technology, Oak Ridge National Laboratory * * Last update: November 14, 2021 remove dependence on CUB/scan * </pre> */ //#define GPU_DEBUG #include "superlu_defs.h" #undef Reduce //#include <thrust/system/gpu/detail/cub/cub.cuh> #include "slustruct_gpu.h" #ifdef HAVE_CUDA #include "superlu_gpu_utils.cu" #elif defined(HAVE_HIP) #include "superlu_gpu_utils.hip.cpp" #endif //extern "C" { // void cblas_daxpy(const int N, const double alpha, const double *X, // const int incX, double *Y, const int incY); //} // gpublasStatus_t checkGPUblas(gpublasStatus_t result) // { // #if defined(DEBUG) || defined(_DEBUG) // if (result != GPUBLAS_STATUS_SUCCESS) // { // fprintf(stderr, "CUDA Blas Runtime Error: %s\n", gpublasGetErrorString(result)); // assert(result == GPUBLAS_STATUS_SUCCESS); // } // #endif // return result; // } // #define UNIT_STRIDE #if 0 ////////// this routine is not used anymore __device__ inline void device_scatter_l (int_t thread_id, int_t nsupc, int_t temp_nbrow, int_t *usub, int_t iukp, int_t klst, float *nzval, int_t ldv, float *tempv, int_t nbrow, // int_t *indirect2_thread int *indirect2_thread ) { int_t segsize, jj; for (jj = 0; jj < nsupc; ++jj) { segsize = klst - usub[iukp + jj]; if (segsize) { if (thread_id < temp_nbrow) { #ifndef UNIT_STRIDE nzval[indirect2_thread[thread_id]] -= tempv[thread_id]; #else nzval[thread_id] -= tempv[thread_id]; /*making access unit strided*/ #endif } tempv += nbrow; } nzval += ldv; } } #endif ///////////// not used //#define THREAD_BLOCK_SIZE 256 /* Sherry: was 192. should be <= MAX_SUPER_SIZE */ __device__ inline void sdevice_scatter_l_2D (int thread_id, int nsupc, int temp_nbrow, int_t *usub, int iukp, int_t klst, float *nzval, int ldv, const float *tempv, int nbrow, int *indirect2_thread, int nnz_cols, int ColPerBlock, int *IndirectJ3 ) { int i; if ( thread_id < temp_nbrow * ColPerBlock ) { int thread_id_x = thread_id % temp_nbrow; int thread_id_y = thread_id / temp_nbrow; #define UNROLL_ITER 8 #pragma unroll 4 for (int col = thread_id_y; col < nnz_cols ; col += ColPerBlock) { i = ldv * IndirectJ3[col] + indirect2_thread[thread_id_x]; nzval[i] -= tempv[nbrow * col + thread_id_x]; } } } /* Sherry: this routine is not used */ #if 0 ////////////////////////////////////////////// __global__ void cub_scan_test(void) { int thread_id = threadIdx.x; typedef cub::BlockScan<int, MAX_SUPER_SIZE > BlockScan; /*1D int data type*/ __shared__ typename BlockScan::TempStorage temp_storage; /*storage temp*/ __shared__ int IndirectJ1[MAX_SUPER_SIZE]; __shared__ int IndirectJ2[MAX_SUPER_SIZE]; if (thread_id < MAX_SUPER_SIZE) { IndirectJ1[thread_id] = (thread_id + 1) % 2; } __syncthreads(); if (thread_id < MAX_SUPER_SIZE) BlockScan(temp_storage).InclusiveSum (IndirectJ1[thread_id], IndirectJ2[thread_id]); if (thread_id < MAX_SUPER_SIZE) printf("%d %d\n", thread_id, IndirectJ2[thread_id]); } #endif /////////////////////////////////// not used __device__ inline void device_scatter_u_2D (int thread_id, int temp_nbrow, int nsupc, float * ucol, int_t * usub, int iukp, int_t ilst, int_t klst, int_t * index, int iuip_lib, float * tempv, int nbrow, int *indirect, int nnz_cols, int ColPerBlock, int *IndirectJ1, int *IndirectJ3 ) { int i; if ( thread_id < temp_nbrow * ColPerBlock ) { /* 1D threads are logically arranged in 2D shape. */ int thread_id_x = thread_id % temp_nbrow; int thread_id_y = thread_id / temp_nbrow; #pragma unroll 4 for (int col = thread_id_y; col < nnz_cols ; col += ColPerBlock) { i = IndirectJ1[IndirectJ3[col]]-ilst + indirect[thread_id_x]; ucol[i] -= tempv[nbrow * col + thread_id_x]; } } } __global__ void Scatter_GPU_kernel( int_t streamId, int_t ii_st, int_t ii_end, int_t jj_st, int_t jj_end, /* defines rectangular Schur block to be scatter */ int_t klst, int_t jj0, /* 0 on entry */ int_t nrows, int_t ldt, int_t npcol, int_t nprow, sLUstruct_gpu_t * A_gpu) { /* initializing pointers */ int_t *xsup = A_gpu->xsup; int_t *UrowindPtr = A_gpu->UrowindPtr; int_t *UrowindVec = A_gpu->UrowindVec; int_t *UnzvalPtr = A_gpu->UnzvalPtr; float *UnzvalVec = A_gpu->UnzvalVec; int_t *LrowindPtr = A_gpu->LrowindPtr; int_t *LrowindVec = A_gpu->LrowindVec; int_t *LnzvalPtr = A_gpu->LnzvalPtr; float *LnzvalVec = A_gpu->LnzvalVec; float *bigV = A_gpu->scubufs[streamId].bigV; local_l_blk_info_t *local_l_blk_infoVec = A_gpu->local_l_blk_infoVec; local_u_blk_info_t *local_u_blk_infoVec = A_gpu->local_u_blk_infoVec; int_t *local_l_blk_infoPtr = A_gpu->local_l_blk_infoPtr; int_t *local_u_blk_infoPtr = A_gpu->local_u_blk_infoPtr; Remain_info_t *Remain_info = A_gpu->scubufs[streamId].Remain_info; Ublock_info_t *Ublock_info = A_gpu->scubufs[streamId].Ublock_info; int_t *lsub = A_gpu->scubufs[streamId].lsub; int_t *usub = A_gpu->scubufs[streamId].usub; /* thread block assignment: this thread block is assigned to block (lb, j) in 2D grid */ int lb = blockIdx.x + ii_st; int j = blockIdx.y + jj_st; extern __shared__ int s[]; int* indirect_lptr = s; /* row-wise */ int* indirect2_thread= (int*) &indirect_lptr[ldt]; /* row-wise */ int* IndirectJ1= (int*) &indirect2_thread[ldt]; /* column-wise */ int* IndirectJ3= (int*) &IndirectJ1[ldt]; /* column-wise */ //int THREAD_BLOCK_SIZE =ldt; int* pfxStorage = (int*) &IndirectJ3[ldt]; int thread_id = threadIdx.x; int iukp = Ublock_info[j].iukp; int jb = Ublock_info[j].jb; int nsupc = SuperSize (jb); int ljb = jb / npcol; float *tempv1; if (jj_st == jj0) { tempv1 = (j == jj_st) ? bigV : bigV + Ublock_info[j - 1].full_u_cols * nrows; } else { tempv1 = (j == jj_st) ? bigV : bigV + (Ublock_info[j - 1].full_u_cols - Ublock_info[jj_st - 1].full_u_cols) * nrows; } /* # of nonzero columns in block j */ int nnz_cols = (j == 0) ? Ublock_info[j].full_u_cols : (Ublock_info[j].full_u_cols - Ublock_info[j - 1].full_u_cols); int cum_ncol = (j == 0) ? 0 : Ublock_info[j - 1].full_u_cols; int lptr = Remain_info[lb].lptr; int ib = Remain_info[lb].ib; int temp_nbrow = lsub[lptr + 1]; /* number of rows in the current L block */ lptr += LB_DESCRIPTOR; int_t cum_nrow; if (ii_st == 0) { cum_nrow = (lb == 0 ? 0 : Remain_info[lb - 1].FullRow); } else { cum_nrow = (lb == 0 ? 0 : Remain_info[lb - 1].FullRow - Remain_info[ii_st - 1].FullRow); } tempv1 += cum_nrow; if (ib < jb) /*scatter U code */ { int ilst = FstBlockC (ib + 1); int lib = ib / nprow; /* local index of row block ib */ int_t *index = &UrowindVec[UrowindPtr[lib]]; int num_u_blocks = index[0]; int ljb = (jb) / npcol; /* local index of column block jb */ /* Each thread is responsible for one block column */ __shared__ int ljb_ind; /*do a search ljb_ind at local row lib*/ int blks_per_threads = CEILING(num_u_blocks, blockDim.x); // printf("blockDim.x =%d \n", blockDim.x); for (int i = 0; i < blks_per_threads; ++i) /* each thread is assigned a chunk of consecutive U blocks to search */ { /* only one thread finds the block index matching ljb */ if (thread_id * blks_per_threads + i < num_u_blocks && local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + thread_id * blks_per_threads + i ].ljb == ljb) { ljb_ind = thread_id * blks_per_threads + i; } } __syncthreads(); int iuip_lib = local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + ljb_ind].iuip; int ruip_lib = local_u_blk_infoVec[ local_u_blk_infoPtr[lib] + ljb_ind].ruip; iuip_lib += UB_DESCRIPTOR; float *Unzval_lib = &UnzvalVec[UnzvalPtr[lib]]; float *ucol = &Unzval_lib[ruip_lib]; if (thread_id < temp_nbrow) /* row-wise */ { /* cyclically map each thread to a row */ indirect_lptr[thread_id] = (int) lsub[lptr + thread_id]; } /* column-wise: each thread is assigned one column */ if (thread_id < nnz_cols) IndirectJ3[thread_id] = A_gpu->scubufs[streamId].usub_IndirectJ3[cum_ncol + thread_id]; /* indirectJ3[j] == kk means the j-th nonzero segment points to column kk in this supernode */ __syncthreads(); /* threads are divided into multiple columns */ int ColPerBlock = blockDim.x / temp_nbrow; // if (thread_id < blockDim.x) // IndirectJ1[thread_id] = 0; if (thread_id < ldt) IndirectJ1[thread_id] = 0; if (thread_id < blockDim.x) { if (thread_id < nsupc) { /* fstnz subscript of each column in the block */ IndirectJ1[thread_id] = -index[iuip_lib + thread_id] + ilst; } } /* perform an inclusive block-wide prefix sum among all threads */ __syncthreads(); incScan(IndirectJ1, pfxStorage, nsupc); __syncthreads(); device_scatter_u_2D ( thread_id, temp_nbrow, nsupc, ucol, usub, iukp, ilst, klst, index, iuip_lib, tempv1, nrows, indirect_lptr, nnz_cols, ColPerBlock, IndirectJ1, IndirectJ3 ); } else /* ib >= jb, scatter L code */ { int rel; float *nzval; int_t *index = &LrowindVec[LrowindPtr[ljb]]; int num_l_blocks = index[0]; int ldv = index[1]; int fnz = FstBlockC (ib); int lib = ib / nprow; __shared__ int lib_ind; /*do a search lib_ind for lib*/ int blks_per_threads = CEILING(num_l_blocks, blockDim.x); for (int i = 0; i < blks_per_threads; ++i) { if (thread_id * blks_per_threads + i < num_l_blocks && local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + thread_id * blks_per_threads + i ].lib == lib) { lib_ind = thread_id * blks_per_threads + i; } } __syncthreads(); int lptrj = local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + lib_ind].lptrj; int luptrj = local_l_blk_infoVec[ local_l_blk_infoPtr[ljb] + lib_ind].luptrj; lptrj += LB_DESCRIPTOR; int dest_nbrow = index[lptrj - 1]; if (thread_id < dest_nbrow) { rel = index[lptrj + thread_id] - fnz; indirect_lptr[rel] = thread_id; } __syncthreads(); /* can be precalculated */ if (thread_id < temp_nbrow) { rel = lsub[lptr + thread_id] - fnz; indirect2_thread[thread_id] = indirect_lptr[rel]; } if (thread_id < nnz_cols) IndirectJ3[thread_id] = (int) A_gpu->scubufs[streamId].usub_IndirectJ3[cum_ncol + thread_id]; __syncthreads(); int ColPerBlock = blockDim.x / temp_nbrow; nzval = &LnzvalVec[LnzvalPtr[ljb]] + luptrj; sdevice_scatter_l_2D( thread_id, nsupc, temp_nbrow, usub, iukp, klst, nzval, ldv, tempv1, nrows, indirect2_thread, nnz_cols, ColPerBlock, IndirectJ3); } /* end else ib >= jb */ } /* end Scatter_GPU_kernel */ #define GPU_2D_SCHUDT /* Not used */ int sSchurCompUpdate_GPU( int_t streamId, int_t jj_cpu, /* 0 on entry, pointing to the start of Phi part */ int_t nub, /* jj_cpu on entry, pointing to the end of the Phi part */ int_t klst, int_t knsupc, int_t Rnbrow, int_t RemainBlk, int_t Remain_lbuf_send_size, int_t bigu_send_size, int_t ldu, int_t mcb, /* num_u_blks_hi */ int_t buffer_size, int_t lsub_len, int_t usub_len, int_t ldt, int_t k0, ssluGPU_t *sluGPU, gridinfo_t *grid ) { int SCATTER_THREAD_BLOCK_SIZE=512; sLUstruct_gpu_t * A_gpu = sluGPU->A_gpu; sLUstruct_gpu_t * dA_gpu = sluGPU->dA_gpu; int_t nprow = grid->nprow; int_t npcol = grid->npcol; gpuStream_t FunCallStream = sluGPU->funCallStreams[streamId]; gpublasHandle_t gpublas_handle0 = sluGPU->gpublasHandles[streamId]; int_t * lsub = A_gpu->scubufs[streamId].lsub_buf; int_t * usub = A_gpu->scubufs[streamId].usub_buf; Remain_info_t *Remain_info = A_gpu->scubufs[streamId].Remain_info_host; float * Remain_L_buff = A_gpu->scubufs[streamId].Remain_L_buff_host; Ublock_info_t *Ublock_info = A_gpu->scubufs[streamId].Ublock_info_host; float * bigU = A_gpu->scubufs[streamId].bigU_host; A_gpu->isOffloaded[k0] = 1; /* start by sending data to */ int_t *xsup = A_gpu->xsup_host; int_t col_back = (jj_cpu == 0) ? 0 : Ublock_info[jj_cpu - 1].full_u_cols; // if(nub<1) return; int_t ncols = Ublock_info[nub - 1].full_u_cols - col_back; /* Sherry: can get max_super_size from sp_ienv(3) */ int_t indirectJ1[MAX_SUPER_SIZE]; // 0 indicates an empry segment int_t indirectJ2[MAX_SUPER_SIZE]; // # of nonzero segments so far int_t indirectJ3[MAX_SUPER_SIZE]; /* indirectJ3[j] == k means the j-th nonzero segment points to column k in this supernode */ /* calculate usub_indirect */ for (int jj = jj_cpu; jj < nub; ++jj) { int_t iukp = Ublock_info[jj].iukp; int_t jb = Ublock_info[jj].jb; int_t nsupc = SuperSize (jb); int_t addr = (jj == 0) ? 0 : Ublock_info[jj - 1].full_u_cols - col_back; for (int_t kk = 0; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { indirectJ1[kk] = 0; } for (int_t kk = 0; kk < nsupc; ++kk) { indirectJ1[kk] = ((klst - usub[iukp + kk]) == 0) ? 0 : 1; } /*prefix sum - indicates # of nonzero segments up to column kk */ indirectJ2[0] = indirectJ1[0]; for (int_t kk = 1; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { indirectJ2[kk] = indirectJ2[kk - 1] + indirectJ1[kk]; } /* total number of nonzero segments in this supernode */ int nnz_col = indirectJ2[nsupc - 1]; // old: MAX_SUPER_SIZE /* compactation */ for (int_t kk = 0; kk < nsupc; ++kk) // old: MAX_SUPER_SIZE { if (indirectJ1[kk]) /* kk is a nonzero segment */ { /* indirectJ3[j] == kk means the j-th nonzero segment points to column kk in this supernode */ indirectJ3[indirectJ2[kk] - 1] = kk; } } for (int i = 0; i < nnz_col; ++i) { /* addr == total # of full columns before current block jj */ A_gpu->scubufs[streamId].usub_IndirectJ3_host[addr + i] = indirectJ3[i]; } } /* end for jj ... calculate usub_indirect */ //printf("sSchurCompUpdate_GPU[3]: jj_cpu %d, nub %d\n", jj_cpu, nub); fflush(stdout); /*sizeof RemainLbuf = Rnbuf*knsupc */ double tTmp = SuperLU_timer_(); gpuEventRecord(A_gpu->ePCIeH2D[k0], FunCallStream); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].usub_IndirectJ3, A_gpu->scubufs[streamId].usub_IndirectJ3_host, ncols * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream)) ; checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Remain_L_buff, Remain_L_buff, Remain_lbuf_send_size * sizeof(float), gpuMemcpyHostToDevice, FunCallStream)) ; checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].bigU, bigU, bigu_send_size * sizeof(float), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Remain_info, Remain_info, RemainBlk * sizeof(Remain_info_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].Ublock_info, Ublock_info, mcb * sizeof(Ublock_info_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].lsub, lsub, lsub_len * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream) ); checkGPU(gpuMemcpyAsync(A_gpu->scubufs[streamId].usub, usub, usub_len * sizeof(int_t), gpuMemcpyHostToDevice, FunCallStream) ); A_gpu->tHost_PCIeH2D += SuperLU_timer_() - tTmp; A_gpu->cPCIeH2D += Remain_lbuf_send_size * sizeof(float) + bigu_send_size * sizeof(float) + RemainBlk * sizeof(Remain_info_t) + mcb * sizeof(Ublock_info_t) + lsub_len * sizeof(int_t) + usub_len * sizeof(int_t); float alpha = 1.0, beta = 0.0; int_t ii_st = 0; int_t ii_end = 0; int_t maxGemmBlockDim = (int) sqrt(buffer_size); // int_t maxGemmBlockDim = 8000; /* Organize GEMM by blocks of [ii_st : ii_end, jj_st : jj_end] that fits in the buffer_size */ while (ii_end < RemainBlk) { ii_st = ii_end; ii_end = RemainBlk; int_t nrow_max = maxGemmBlockDim; // nrow_max = Rnbrow; int_t remaining_rows = (ii_st == 0) ? Rnbrow : Rnbrow - Remain_info[ii_st - 1].FullRow; nrow_max = (remaining_rows / nrow_max) > 0 ? remaining_rows / CEILING(remaining_rows, nrow_max) : nrow_max; int_t ResRow = (ii_st == 0) ? 0 : Remain_info[ii_st - 1].FullRow; for (int_t i = ii_st; i < RemainBlk - 1; ++i) { if ( Remain_info[i + 1].FullRow > ResRow + nrow_max) { ii_end = i; break; /* row dimension reaches nrow_max */ } } int_t nrows; /* actual row dimension for GEMM */ int_t st_row; if (ii_st > 0) { nrows = Remain_info[ii_end - 1].FullRow - Remain_info[ii_st - 1].FullRow; st_row = Remain_info[ii_st - 1].FullRow; } else { nrows = Remain_info[ii_end - 1].FullRow; st_row = 0; } int jj_st = jj_cpu; int jj_end = jj_cpu; while (jj_end < nub && nrows > 0 ) { int_t remaining_cols = (jj_st == jj_cpu) ? ncols : ncols - Ublock_info[jj_st - 1].full_u_cols; if ( remaining_cols * nrows < buffer_size) { jj_st = jj_end; jj_end = nub; } else /* C matrix cannot fit in buffer, need to break into pieces */ { int_t ncol_max = buffer_size / nrows; /** Must revisit **/ ncol_max = SUPERLU_MIN(ncol_max, maxGemmBlockDim); ncol_max = (remaining_cols / ncol_max) > 0 ? remaining_cols / CEILING(remaining_cols, ncol_max) : ncol_max; jj_st = jj_end; jj_end = nub; int_t ResCol = (jj_st == 0) ? 0 : Ublock_info[jj_st - 1].full_u_cols; for (int_t j = jj_st; j < nub - 1; ++j) { if (Ublock_info[j + 1].full_u_cols > ResCol + ncol_max) { jj_end = j; break; } } } /* end-if-else */ int ncols; int st_col; if (jj_st > 0) { ncols = Ublock_info[jj_end - 1].full_u_cols - Ublock_info[jj_st - 1].full_u_cols; st_col = Ublock_info[jj_st - 1].full_u_cols; if (ncols == 0) exit(0); } else { ncols = Ublock_info[jj_end - 1].full_u_cols; st_col = 0; } /* none of the matrix dimension is zero. */ if (nrows > 0 && ldu > 0 && ncols > 0) { if (nrows * ncols > buffer_size) { printf("!! Matrix size %lld x %lld exceeds buffer_size %lld\n", nrows, ncols, buffer_size); fflush(stdout); } assert(nrows * ncols <= buffer_size); gpublasSetStream(gpublas_handle0, FunCallStream); gpuEventRecord(A_gpu->GemmStart[k0], FunCallStream); gpublasSgemm(gpublas_handle0, GPUBLAS_OP_N, GPUBLAS_OP_N, nrows, ncols, ldu, &alpha, &A_gpu->scubufs[streamId].Remain_L_buff[(knsupc - ldu) * Rnbrow + st_row], Rnbrow, &A_gpu->scubufs[streamId].bigU[st_col * ldu], ldu, &beta, A_gpu->scubufs[streamId].bigV, nrows); // #define SCATTER_OPT #ifdef SCATTER_OPT gpuStreamSynchronize(FunCallStream); #warning this function is synchronous #endif gpuEventRecord(A_gpu->GemmEnd[k0], FunCallStream); A_gpu->GemmFLOPCounter += 2.0 * (double) nrows * ncols * ldu; /* * Scattering the output */ // dim3 dimBlock(THREAD_BLOCK_SIZE); // 1d thread dim3 dimBlock(ldt); // 1d thread dim3 dimGrid(ii_end - ii_st, jj_end - jj_st); Scatter_GPU_kernel <<< dimGrid, dimBlock, (4*ldt + 2*SCATTER_THREAD_BLOCK_SIZE)*sizeof(int), FunCallStream>>> (streamId, ii_st, ii_end, jj_st, jj_end, klst, 0, nrows, ldt, npcol, nprow, dA_gpu); #ifdef SCATTER_OPT gpuStreamSynchronize(FunCallStream); #warning this function is synchrnous #endif gpuEventRecord(A_gpu->ScatterEnd[k0], FunCallStream); A_gpu->ScatterMOPCounter += 3.0 * (double) nrows * ncols; } /* endif ... none of the matrix dimension is zero. */ } /* end while jj_end < nub */ } /* end while (ii_end < RemainBlk) */ return 0; } /* end sSchurCompUpdate_GPU */ static void print_occupancy() { int blockSize; // The launch configurator returned block size int minGridSize; /* The minimum grid size needed to achieve the best potential occupancy */ gpuOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, Scatter_GPU_kernel, 0, 0); printf("Occupancy: MinGridSize %d blocksize %d \n", minGridSize, blockSize); } static void printDevProp(gpuDeviceProp devProp) { size_t mfree, mtotal; gpuMemGetInfo (&mfree, &mtotal); printf("pciBusID: %d\n", devProp.pciBusID); printf("pciDeviceID: %d\n", devProp.pciDeviceID); printf("GPU Name: %s\n", devProp.name); printf("Total global memory: %zu\n", devProp.totalGlobalMem); printf("Total free memory: %zu\n", mfree); printf("Clock rate: %d\n", devProp.clockRate); return; } static size_t get_acc_memory () { size_t mfree, mtotal; gpuMemGetInfo (&mfree, &mtotal); #if 0 printf("Total memory %zu & free memory %zu\n", mtotal, mfree); #endif return (size_t) (0.9 * (double) mfree) / get_mpi_process_per_gpu (); } int sfree_LUstruct_gpu (sLUstruct_gpu_t * A_gpu) { /* Free the L data structure on GPU */ checkGPU(gpuFree(A_gpu->LrowindVec)); checkGPU(gpuFree(A_gpu->LrowindPtr)); checkGPU(gpuFree(A_gpu->LnzvalVec)); checkGPU(gpuFree(A_gpu->LnzvalPtr)); free(A_gpu->LnzvalPtr_host); /*freeing the pinned memory*/ int_t streamId = 0; checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Remain_info_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Ublock_info_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].Remain_L_buff_host)); checkGPU (gpuFreeHost (A_gpu->scubufs[streamId].bigU_host)); checkGPU(gpuFreeHost(A_gpu->acc_L_buff)); checkGPU(gpuFreeHost(A_gpu->acc_U_buff)); checkGPU(gpuFreeHost(A_gpu->scubufs[streamId].lsub_buf)); checkGPU(gpuFreeHost(A_gpu->scubufs[streamId].usub_buf)); SUPERLU_FREE(A_gpu->isOffloaded); // changed to SUPERLU_MALLOC/SUPERLU_FREE SUPERLU_FREE(A_gpu->GemmStart); SUPERLU_FREE(A_gpu->GemmEnd); SUPERLU_FREE(A_gpu->ScatterEnd); SUPERLU_FREE(A_gpu->ePCIeH2D); SUPERLU_FREE(A_gpu->ePCIeD2H_Start); SUPERLU_FREE(A_gpu->ePCIeD2H_End); /* Free the U data structure on GPU */ checkGPU(gpuFree(A_gpu->UrowindVec)); checkGPU(gpuFree(A_gpu->UrowindPtr)); //free(A_gpu->UrowindPtr_host); // Sherry: this is NOT allocated checkGPU(gpuFree(A_gpu->UnzvalVec)); checkGPU(gpuFree(A_gpu->UnzvalPtr)); checkGPU(gpuFree(A_gpu->grid)); /* Free the Schur complement structure on GPU */ checkGPU(gpuFree(A_gpu->scubufs[streamId].bigV)); checkGPU(gpuFree(A_gpu->scubufs[streamId].bigU)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Remain_L_buff)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Ublock_info)); checkGPU(gpuFree(A_gpu->scubufs[streamId].Remain_info)); // checkGPU(gpuFree(A_gpu->indirect)); // checkGPU(gpuFree(A_gpu->indirect2)); checkGPU(gpuFree(A_gpu->xsup)); checkGPU(gpuFree(A_gpu->scubufs[streamId].lsub)); checkGPU(gpuFree(A_gpu->scubufs[streamId].usub)); checkGPU(gpuFree(A_gpu->local_l_blk_infoVec)); checkGPU(gpuFree(A_gpu->local_l_blk_infoPtr)); checkGPU(gpuFree(A_gpu->jib_lookupVec)); checkGPU(gpuFree(A_gpu->jib_lookupPtr)); checkGPU(gpuFree(A_gpu->local_u_blk_infoVec)); checkGPU(gpuFree(A_gpu->local_u_blk_infoPtr)); checkGPU(gpuFree(A_gpu->ijb_lookupVec)); checkGPU(gpuFree(A_gpu->ijb_lookupPtr)); return 0; } void sPrint_matrix( char *desc, int_t m, int_t n, float * dA, int_t lda ) { float *cPtr = (float *) malloc(sizeof(float) * lda * n); checkGPU(gpuMemcpy( cPtr, dA, lda * n * sizeof(float), gpuMemcpyDeviceToHost)) ; int_t i, j; printf( "\n %s\n", desc ); for ( i = 0; i < m; i++ ) { for ( j = 0; j < n; j++ ) printf( " %.3e", cPtr[i + j * lda] ); printf( "\n" ); } free(cPtr); } void sprintGPUStats(sLUstruct_gpu_t * A_gpu) { double tGemm = 0; double tScatter = 0; double tPCIeH2D = 0; double tPCIeD2H = 0; for (int_t i = 0; i < A_gpu->nsupers; ++i) { float milliseconds = 0; if (A_gpu->isOffloaded[i]) { gpuEventElapsedTime(&milliseconds, A_gpu->ePCIeH2D[i], A_gpu->GemmStart[i]); tPCIeH2D += 1e-3 * (double) milliseconds; milliseconds = 0; gpuEventElapsedTime(&milliseconds, A_gpu->GemmStart[i], A_gpu->GemmEnd[i]); tGemm += 1e-3 * (double) milliseconds; milliseconds = 0; gpuEventElapsedTime(&milliseconds, A_gpu->GemmEnd[i], A_gpu->ScatterEnd[i]); tScatter += 1e-3 * (double) milliseconds; } milliseconds = 0; gpuEventElapsedTime(&milliseconds, A_gpu->ePCIeD2H_Start[i], A_gpu->ePCIeD2H_End[i]); tPCIeD2H += 1e-3 * (double) milliseconds; } printf("GPU: Flops offloaded %.3e Time spent %lf Flop rate %lf GF/sec \n", A_gpu->GemmFLOPCounter, tGemm, 1e-9 * A_gpu->GemmFLOPCounter / tGemm ); printf("GPU: Mop offloaded %.3e Time spent %lf Bandwidth %lf GByte/sec \n", A_gpu->ScatterMOPCounter, tScatter, 8e-9 * A_gpu->ScatterMOPCounter / tScatter ); printf("PCIe Data Transfer H2D:\n\tData Sent %.3e(GB)\n\tTime observed from CPU %lf\n\tActual time spent %lf\n\tBandwidth %lf GByte/sec \n", 1e-9 * A_gpu->cPCIeH2D, A_gpu->tHost_PCIeH2D, tPCIeH2D, 1e-9 * A_gpu->cPCIeH2D / tPCIeH2D ); printf("PCIe Data Transfer D2H:\n\tData Sent %.3e(GB)\n\tTime observed from CPU %lf\n\tActual time spent %lf\n\tBandwidth %lf GByte/sec \n", 1e-9 * A_gpu->cPCIeD2H, A_gpu->tHost_PCIeD2H, tPCIeD2H, 1e-9 * A_gpu->cPCIeD2H / tPCIeD2H ); fflush(stdout); } /* end printGPUStats */ /* Initialize the GPU side of the data structure. */ int sinitSluGPU3D_t( ssluGPU_t *sluGPU, // LU structures on GPU, see slustruct_gpu.h sLUstruct_t *LUstruct, gridinfo3d_t * grid3d, int_t* perm_c_supno, int_t n, int_t buffer_size, /* read from env variable MAX_BUFFER_SIZE */ int_t bigu_size, int_t ldt /* NSUP read from sp_ienv(3) */ ) { checkGPUErrors(gpuDeviceReset ()) ; Glu_persist_t *Glu_persist = LUstruct->Glu_persist; sLocalLU_t *Llu = LUstruct->Llu; int* isNodeInMyGrid = sluGPU->isNodeInMyGrid; sluGPU->nGPUStreams = getnGPUStreams(); int SCATTER_THREAD_BLOCK_SIZE = ldt; if(getenv("SCATTER_THREAD_BLOCK_SIZE")) { int stbs = atoi(getenv("SCATTER_THREAD_BLOCK_SIZE")); if(stbs>=ldt) { SCATTER_THREAD_BLOCK_SIZE = stbs; } } if (grid3d->iam == 0) { printf("dinitSluGPU3D_t: Using hardware acceleration, with %d gpu streams \n", sluGPU->nGPUStreams); fflush(stdout); printf("dinitSluGPU3D_t: Using %d threads per block for scatter \n", SCATTER_THREAD_BLOCK_SIZE); if ( MAX_SUPER_SIZE < ldt ) { ABORT("MAX_SUPER_SIZE smaller than requested NSUP"); } } gpuStreamCreate(&(sluGPU->CopyStream)); for (int streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { gpuStreamCreate(&(sluGPU->funCallStreams[streamId])); gpublasCreate(&(sluGPU->gpublasHandles[streamId])); sluGPU->lastOffloadStream[streamId] = -1; } sluGPU->A_gpu = (sLUstruct_gpu_t *) malloc (sizeof(sLUstruct_gpu_t)); sluGPU->A_gpu->perm_c_supno = perm_c_supno; /* Allocate GPU memory for the LU data structures, and copy the host LU structure to GPU side. */ sCopyLUToGPU3D ( isNodeInMyGrid, Llu, /* referred to as A_host */ sluGPU, Glu_persist, n, grid3d, buffer_size, bigu_size, ldt ); return 0; } /* end sinitSluGPU3D_t */ int sinitD2Hreduce( int next_k, d2Hreduce_t* d2Hred, int last_flag, HyP_t* HyP, ssluGPU_t *sluGPU, gridinfo_t *grid, sLUstruct_t *LUstruct, SCT_t* SCT ) { Glu_persist_t *Glu_persist = LUstruct->Glu_persist; sLocalLU_t *Llu = LUstruct->Llu; int_t* xsup = Glu_persist->xsup; int_t iam = grid->iam; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t** Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; int_t** Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; // int_t next_col = SUPERLU_MIN (k0 + num_look_aheads + 1, nsupers - 1); // int_t next_k = perm_c_supno[next_col]; /* global block number for next colum*/ int_t mkcol, mkrow; int_t kljb = LBj( next_k, grid ); /*local block number for next block*/ int_t kijb = LBi( next_k, grid ); /*local block number for next block*/ int_t *kindexL ; /*for storing index vectors*/ int_t *kindexU ; mkrow = PROW (next_k, grid); mkcol = PCOL (next_k, grid); int_t ksup_size = SuperSize(next_k); int_t copyL_kljb = 0; int_t copyU_kljb = 0; int_t l_copy_len = 0; int_t u_copy_len = 0; if (mkcol == mycol && Lrowind_bc_ptr[kljb] != NULL && last_flag) { if (HyP->Lblock_dirty_bit[kljb] > -1) { copyL_kljb = 1; int_t lastk0 = HyP->Lblock_dirty_bit[kljb]; int_t streamIdk0Offload = lastk0 % sluGPU->nGPUStreams; if (sluGPU->lastOffloadStream[streamIdk0Offload] == lastk0 && lastk0 != -1) { // printf("Waiting for Offload =%d to finish StreamId=%d\n", lastk0, streamIdk0Offload); double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamIdk0Offload]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamIdk0Offload] = -1; } } kindexL = Lrowind_bc_ptr[kljb]; l_copy_len = kindexL[1] * ksup_size; } if ( mkrow == myrow && Ufstnz_br_ptr[kijb] != NULL && last_flag ) { if (HyP->Ublock_dirty_bit[kijb] > -1) { copyU_kljb = 1; int_t lastk0 = HyP->Ublock_dirty_bit[kijb]; int_t streamIdk0Offload = lastk0 % sluGPU->nGPUStreams; if (sluGPU->lastOffloadStream[streamIdk0Offload] == lastk0 && lastk0 != -1) { // printf("Waiting for Offload =%d to finish StreamId=%d\n", lastk0, streamIdk0Offload); double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamIdk0Offload]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamIdk0Offload] = -1; } } // copyU_kljb = HyP->Ublock_dirty_bit[kijb]>-1? 1: 0; kindexU = Ufstnz_br_ptr[kijb]; u_copy_len = kindexU[1]; } // wait for streams if they have not been finished // d2Hred->next_col = next_col; d2Hred->next_k = next_k; d2Hred->kljb = kljb; d2Hred->kijb = kijb; d2Hred->copyL_kljb = copyL_kljb; d2Hred->copyU_kljb = copyU_kljb; d2Hred->l_copy_len = l_copy_len; d2Hred->u_copy_len = u_copy_len; d2Hred->kindexU = kindexU; d2Hred->kindexL = kindexL; d2Hred->mkrow = mkrow; d2Hred->mkcol = mkcol; d2Hred->ksup_size = ksup_size; return 0; } /* sinitD2Hreduce */ int sreduceGPUlu( int last_flag, d2Hreduce_t* d2Hred, ssluGPU_t *sluGPU, SCT_t *SCT, gridinfo_t *grid, sLUstruct_t *LUstruct ) { sLocalLU_t *Llu = LUstruct->Llu; int iam = grid->iam; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t** Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; float** Lnzval_bc_ptr = Llu->Lnzval_bc_ptr; int_t** Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; float** Unzval_br_ptr = Llu->Unzval_br_ptr; gpuStream_t CopyStream; sLUstruct_gpu_t *A_gpu; A_gpu = sluGPU->A_gpu; CopyStream = sluGPU->CopyStream; int_t kljb = d2Hred->kljb; int_t kijb = d2Hred->kijb; int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; int_t mkrow = d2Hred->mkrow; int_t mkcol = d2Hred->mkcol; int_t ksup_size = d2Hred->ksup_size; int_t *kindex; if ((copyL_kljb || copyU_kljb) && last_flag ) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(CopyStream); SCT->PhiWaitTimer_2 += SuperLU_timer_() - ttx; } double tt_start = SuperLU_timer_(); if (last_flag) { if (mkcol == mycol && Lrowind_bc_ptr[kljb] != NULL ) { kindex = Lrowind_bc_ptr[kljb]; int_t len = kindex[1]; if (copyL_kljb) { float *nzval_host; nzval_host = Lnzval_bc_ptr[kljb]; int_t llen = ksup_size * len; float alpha = 1; superlu_saxpy (llen, alpha, A_gpu->acc_L_buff, 1, nzval_host, 1); } } } if (last_flag) { if (mkrow == myrow && Ufstnz_br_ptr[kijb] != NULL ) { kindex = Ufstnz_br_ptr[kijb]; int_t len = kindex[1]; if (copyU_kljb) { float *nzval_host; nzval_host = Unzval_br_ptr[kijb]; float alpha = 1; superlu_saxpy (len, alpha, A_gpu->acc_U_buff, 1, nzval_host, 1); } } } double tt_end = SuperLU_timer_(); SCT->AssemblyTimer += tt_end - tt_start; return 0; } /* sreduceGPUlu */ int swaitGPUscu(int streamId, ssluGPU_t *sluGPU, SCT_t *SCT) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; return 0; } int ssendLUpanelGPU2HOST( int_t k0, d2Hreduce_t* d2Hred, ssluGPU_t *sluGPU ) { int_t kljb = d2Hred->kljb; int_t kijb = d2Hred->kijb; int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; int_t l_copy_len = d2Hred->l_copy_len; int_t u_copy_len = d2Hred->u_copy_len; gpuStream_t CopyStream = sluGPU->CopyStream;; sLUstruct_gpu_t *A_gpu = sluGPU->A_gpu; double tty = SuperLU_timer_(); gpuEventRecord(A_gpu->ePCIeD2H_Start[k0], CopyStream); if (copyL_kljb) checkGPU(gpuMemcpyAsync(A_gpu->acc_L_buff, &A_gpu->LnzvalVec[A_gpu->LnzvalPtr_host[kljb]], l_copy_len * sizeof(float), gpuMemcpyDeviceToHost, CopyStream ) ); if (copyU_kljb) checkGPU(gpuMemcpyAsync(A_gpu->acc_U_buff, &A_gpu->UnzvalVec[A_gpu->UnzvalPtr_host[kijb]], u_copy_len * sizeof(float), gpuMemcpyDeviceToHost, CopyStream ) ); gpuEventRecord(A_gpu->ePCIeD2H_End[k0], CopyStream); A_gpu->tHost_PCIeD2H += SuperLU_timer_() - tty; A_gpu->cPCIeD2H += u_copy_len * sizeof(float) + l_copy_len * sizeof(float); return 0; } /* Copy L and U panel data structures from host to the host part of the data structures in A_gpu. GPU is not involved in this routine. */ int ssendSCUdataHost2GPU( int_t streamId, int_t* lsub, int_t* usub, float* bigU, int_t bigu_send_size, int_t Remain_lbuf_send_size, ssluGPU_t *sluGPU, HyP_t* HyP ) { //{printf("....[enter] ssendSCUdataHost2GPU, bigu_send_size %d\n", bigu_send_size); fflush(stdout);} int_t usub_len = usub[2]; int_t lsub_len = lsub[1] + BC_HEADER + lsub[0] * LB_DESCRIPTOR; //{printf("....[2] in ssendSCUdataHost2GPU, lsub_len %d\n", lsub_len); fflush(stdout);} sLUstruct_gpu_t *A_gpu = sluGPU->A_gpu; memcpy(A_gpu->scubufs[streamId].lsub_buf, lsub, sizeof(int_t)*lsub_len); memcpy(A_gpu->scubufs[streamId].usub_buf, usub, sizeof(int_t)*usub_len); memcpy(A_gpu->scubufs[streamId].Remain_info_host, HyP->Remain_info, sizeof(Remain_info_t)*HyP->RemainBlk); memcpy(A_gpu->scubufs[streamId].Ublock_info_host, HyP->Ublock_info_Phi, sizeof(Ublock_info_t)*HyP->num_u_blks_Phi); memcpy(A_gpu->scubufs[streamId].Remain_L_buff_host, HyP->Remain_L_buff, sizeof(float)*Remain_lbuf_send_size); memcpy(A_gpu->scubufs[streamId].bigU_host, bigU, sizeof(float)*bigu_send_size); return 0; } /* Sherry: not used ?*/ #if 0 int freeSluGPU(ssluGPU_t *sluGPU) { return 0; } #endif /* Allocate GPU memory for the LU data structures, and copy the host LU structure to GPU side. After factorization, the GPU LU structure should be freed by calling sfree_LUsstruct_gpu(). */ void sCopyLUToGPU3D ( int* isNodeInMyGrid, sLocalLU_t *A_host, /* distributed LU structure on host */ ssluGPU_t *sluGPU, /* hold LU structure on GPU */ Glu_persist_t *Glu_persist, int_t n, gridinfo3d_t *grid3d, int_t buffer_size, /* bigV size on GPU for Schur complement update */ int_t bigu_size, int_t ldt ) { gridinfo_t* grid = &(grid3d->grid2d); sLUstruct_gpu_t * A_gpu = sluGPU->A_gpu; sLUstruct_gpu_t **dA_gpu = &(sluGPU->dA_gpu); #if ( PRNTlevel>=1 ) if ( grid3d->iam == 0 ) print_occupancy(); #endif #ifdef GPU_DEBUG // if ( grid3d->iam == 0 ) { gpuDeviceProp devProp; gpuGetDeviceProperties(&devProp, 0); printDevProp(devProp); } #endif int_t *xsup ; xsup = Glu_persist->xsup; int iam = grid->iam; int nsupers = Glu_persist->supno[n - 1] + 1; int_t Pc = grid->npcol; int_t Pr = grid->nprow; int_t myrow = MYROW (iam, grid); int_t mycol = MYCOL (iam, grid); int_t mrb = (nsupers + Pr - 1) / Pr; int_t mcb = (nsupers + Pc - 1) / Pc; int_t remain_l_max = A_host->bufmax[1]; /*copies of scalars for easy access*/ A_gpu->nsupers = nsupers; A_gpu->ScatterMOPCounter = 0; A_gpu->GemmFLOPCounter = 0; A_gpu->cPCIeH2D = 0; A_gpu->cPCIeD2H = 0; A_gpu->tHost_PCIeH2D = 0; A_gpu->tHost_PCIeD2H = 0; /*initializing memory*/ size_t max_gpu_memory = get_acc_memory (); size_t gpu_mem_used = 0; void *tmp_ptr; A_gpu->xsup_host = xsup; int_t nGPUStreams = sluGPU->nGPUStreams; /*pinned memory allocations. Paged-locked memory by gpuMallocHost is accessible to the device.*/ for (int streamId = 0; streamId < nGPUStreams; streamId++ ) { void *tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, (n) * sizeof(int_t) )) ; A_gpu->scubufs[streamId].usub_IndirectJ3_host = (int_t*) tmp_ptr; checkGPUErrors(gpuMalloc( &tmp_ptr, ( n) * sizeof(int_t) )); A_gpu->scubufs[streamId].usub_IndirectJ3 = (int_t*) tmp_ptr; gpu_mem_used += ( n) * sizeof(int_t); checkGPUErrors(gpuMallocHost( &tmp_ptr, mrb * sizeof(Remain_info_t) )) ; A_gpu->scubufs[streamId].Remain_info_host = (Remain_info_t*)tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, mcb * sizeof(Ublock_info_t) )) ; A_gpu->scubufs[streamId].Ublock_info_host = (Ublock_info_t*)tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, remain_l_max * sizeof(float) )) ; A_gpu->scubufs[streamId].Remain_L_buff_host = (float *) tmp_ptr; checkGPUErrors(gpuMallocHost( &tmp_ptr, bigu_size * sizeof(float) )) ; A_gpu->scubufs[streamId].bigU_host = (float *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(float) * (A_host->bufmax[1]))); A_gpu->acc_L_buff = (float *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(float) * (A_host->bufmax[3]))); A_gpu->acc_U_buff = (float *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(int_t) * (A_host->bufmax[0]))); A_gpu->scubufs[streamId].lsub_buf = (int_t *) tmp_ptr; checkGPUErrors(gpuMallocHost ( &tmp_ptr, sizeof(int_t) * (A_host->bufmax[2]))); A_gpu->scubufs[streamId].usub_buf = (int_t *) tmp_ptr; checkGPUErrors(gpuMalloc( &tmp_ptr, remain_l_max * sizeof(float) )) ; A_gpu->scubufs[streamId].Remain_L_buff = (float *) tmp_ptr; gpu_mem_used += remain_l_max * sizeof(float); checkGPUErrors(gpuMalloc( &tmp_ptr, bigu_size * sizeof(float) )) ; A_gpu->scubufs[streamId].bigU = (float *) tmp_ptr; gpu_mem_used += bigu_size * sizeof(float); checkGPUErrors(gpuMalloc( &tmp_ptr, mcb * sizeof(Ublock_info_t) )) ; A_gpu->scubufs[streamId].Ublock_info = (Ublock_info_t *) tmp_ptr; gpu_mem_used += mcb * sizeof(Ublock_info_t); checkGPUErrors(gpuMalloc( &tmp_ptr, mrb * sizeof(Remain_info_t) )) ; A_gpu->scubufs[streamId].Remain_info = (Remain_info_t *) tmp_ptr; gpu_mem_used += mrb * sizeof(Remain_info_t); checkGPUErrors(gpuMalloc( &tmp_ptr, buffer_size * sizeof(float))) ; A_gpu->scubufs[streamId].bigV = (float *) tmp_ptr; gpu_mem_used += buffer_size * sizeof(float); checkGPUErrors(gpuMalloc( &tmp_ptr, A_host->bufmax[0]*sizeof(int_t))) ; A_gpu->scubufs[streamId].lsub = (int_t *) tmp_ptr; gpu_mem_used += A_host->bufmax[0] * sizeof(int_t); checkGPUErrors(gpuMalloc( &tmp_ptr, A_host->bufmax[2]*sizeof(int_t))) ; A_gpu->scubufs[streamId].usub = (int_t *) tmp_ptr; gpu_mem_used += A_host->bufmax[2] * sizeof(int_t); } /* endfor streamID ... allocate paged-locked memory */ A_gpu->isOffloaded = (int *) SUPERLU_MALLOC (sizeof(int) * nsupers); A_gpu->GemmStart = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); A_gpu->GemmEnd = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); A_gpu->ScatterEnd = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); A_gpu->ePCIeH2D = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); A_gpu->ePCIeD2H_Start = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); A_gpu->ePCIeD2H_End = (gpuEvent_t *) SUPERLU_MALLOC(sizeof(gpuEvent_t) * nsupers); for (int i = 0; i < nsupers; ++i) { A_gpu->isOffloaded[i] = 0; checkGPUErrors(gpuEventCreate(&(A_gpu->GemmStart[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->GemmEnd[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->ScatterEnd[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->ePCIeH2D[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->ePCIeH2D[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->ePCIeD2H_Start[i]))); checkGPUErrors(gpuEventCreate(&(A_gpu->ePCIeD2H_End[i]))); } /*---- Copy L data structure to GPU ----*/ /*pointers and address of local blocks for easy accessibility */ local_l_blk_info_t *local_l_blk_infoVec; int_t * local_l_blk_infoPtr; local_l_blk_infoPtr = (int_t *) malloc( CEILING(nsupers, Pc) * sizeof(int_t ) ); /* First pass: count total L blocks */ int_t cum_num_l_blocks = 0; /* total number of L blocks I own */ for (int_t i = 0; i < CEILING(nsupers, Pc); ++i) { /* going through each block column I own */ if (A_host->Lrowind_bc_ptr[i] != NULL && isNodeInMyGrid[i * Pc + mycol] == 1) { int_t *index = A_host->Lrowind_bc_ptr[i]; int_t num_l_blocks = index[0]; cum_num_l_blocks += num_l_blocks; } } /*allocating memory*/ local_l_blk_infoVec = (local_l_blk_info_t *) malloc(cum_num_l_blocks * sizeof(local_l_blk_info_t)); /* Second pass: set up the meta-data for the L structure */ cum_num_l_blocks = 0; /*initialzing vectors */ for (int_t i = 0; i < CEILING(nsupers, Pc); ++i) { if (A_host->Lrowind_bc_ptr[i] != NULL && isNodeInMyGrid[i * Pc + mycol] == 1) { int_t *index = A_host->Lrowind_bc_ptr[i]; int_t num_l_blocks = index[0]; /* # L blocks in this column */ if (num_l_blocks > 0) { local_l_blk_info_t *local_l_blk_info_i = local_l_blk_infoVec + cum_num_l_blocks; local_l_blk_infoPtr[i] = cum_num_l_blocks; int_t lptrj = BC_HEADER; int_t luptrj = 0; for (int_t j = 0; j < num_l_blocks ; ++j) { int_t ijb = index[lptrj]; local_l_blk_info_i[j].lib = ijb / Pr; local_l_blk_info_i[j].lptrj = lptrj; local_l_blk_info_i[j].luptrj = luptrj; luptrj += index[lptrj + 1]; lptrj += LB_DESCRIPTOR + index[lptrj + 1]; } } cum_num_l_blocks += num_l_blocks; } } /* endfor all block columns */ /* Allocate L memory on GPU, and copy the values from CPU to GPU */ checkGPUErrors(gpuMalloc( &tmp_ptr, cum_num_l_blocks * sizeof(local_l_blk_info_t))) ; A_gpu->local_l_blk_infoVec = (local_l_blk_info_t *) tmp_ptr; gpu_mem_used += cum_num_l_blocks * sizeof(local_l_blk_info_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_l_blk_infoVec), local_l_blk_infoVec, cum_num_l_blocks * sizeof(local_l_blk_info_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, CEILING(nsupers, Pc)*sizeof(int_t))) ; A_gpu->local_l_blk_infoPtr = (int_t *) tmp_ptr; gpu_mem_used += CEILING(nsupers, Pc) * sizeof(int_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_l_blk_infoPtr), local_l_blk_infoPtr, CEILING(nsupers, Pc)*sizeof(int_t), gpuMemcpyHostToDevice)) ; /*---- Copy U data structure to GPU ----*/ local_u_blk_info_t *local_u_blk_infoVec; int_t * local_u_blk_infoPtr; local_u_blk_infoPtr = (int_t *) malloc( CEILING(nsupers, Pr) * sizeof(int_t ) ); /* First pass: count total U blocks */ int_t cum_num_u_blocks = 0; for (int_t i = 0; i < CEILING(nsupers, Pr); ++i) { if (A_host->Ufstnz_br_ptr[i] != NULL && isNodeInMyGrid[i * Pr + myrow] == 1) { int_t *index = A_host->Ufstnz_br_ptr[i]; int_t num_u_blocks = index[0]; cum_num_u_blocks += num_u_blocks; } } local_u_blk_infoVec = (local_u_blk_info_t *) malloc(cum_num_u_blocks * sizeof(local_u_blk_info_t)); /* Second pass: set up the meta-data for the U structure */ cum_num_u_blocks = 0; for (int_t i = 0; i < CEILING(nsupers, Pr); ++i) { if (A_host->Ufstnz_br_ptr[i] != NULL && isNodeInMyGrid[i * Pr + myrow] == 1) { int_t *index = A_host->Ufstnz_br_ptr[i]; int_t num_u_blocks = index[0]; if (num_u_blocks > 0) { local_u_blk_info_t *local_u_blk_info_i = local_u_blk_infoVec + cum_num_u_blocks; local_u_blk_infoPtr[i] = cum_num_u_blocks; int_t iuip_lib, ruip_lib; iuip_lib = BR_HEADER; ruip_lib = 0; for (int_t j = 0; j < num_u_blocks ; ++j) { int_t ijb = index[iuip_lib]; local_u_blk_info_i[j].ljb = ijb / Pc; local_u_blk_info_i[j].iuip = iuip_lib; local_u_blk_info_i[j].ruip = ruip_lib; ruip_lib += index[iuip_lib + 1]; iuip_lib += UB_DESCRIPTOR + SuperSize (ijb); } } cum_num_u_blocks += num_u_blocks; } } checkGPUErrors(gpuMalloc( &tmp_ptr, cum_num_u_blocks * sizeof(local_u_blk_info_t))) ; A_gpu->local_u_blk_infoVec = (local_u_blk_info_t *) tmp_ptr; gpu_mem_used += cum_num_u_blocks * sizeof(local_u_blk_info_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_u_blk_infoVec), local_u_blk_infoVec, cum_num_u_blocks * sizeof(local_u_blk_info_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, CEILING(nsupers, Pr)*sizeof(int_t))) ; A_gpu->local_u_blk_infoPtr = (int_t *) tmp_ptr; gpu_mem_used += CEILING(nsupers, Pr) * sizeof(int_t); checkGPUErrors(gpuMemcpy( (A_gpu->local_u_blk_infoPtr), local_u_blk_infoPtr, CEILING(nsupers, Pr)*sizeof(int_t), gpuMemcpyHostToDevice)) ; /* Copy the actual L indices and values */ int_t l_k = CEILING( nsupers, grid->npcol ); /* # of local block columns */ int_t *temp_LrowindPtr = (int_t *) malloc(sizeof(int_t) * l_k); int_t *temp_LnzvalPtr = (int_t *) malloc(sizeof(int_t) * l_k); int_t *Lnzval_size = (int_t *) malloc(sizeof(int_t) * l_k); int_t l_ind_len = 0; int_t l_val_len = 0; for (int_t jb = 0; jb < nsupers; ++jb) /* for each block column ... */ { int_t pc = PCOL( jb, grid ); if (mycol == pc && isNodeInMyGrid[jb] == 1) { int_t ljb = LBj( jb, grid ); /* Local block number */ int_t *index_host; index_host = A_host->Lrowind_bc_ptr[ljb]; temp_LrowindPtr[ljb] = l_ind_len; temp_LnzvalPtr[ljb] = l_val_len; // ### Lnzval_size[ljb] = 0; //### if (index_host != NULL) { int_t nrbl = index_host[0]; /* number of L blocks */ int_t len = index_host[1]; /* LDA of the nzval[] */ int_t len1 = len + BC_HEADER + nrbl * LB_DESCRIPTOR; /* Global block number is mycol + ljb*Pc */ int_t nsupc = SuperSize(jb); l_ind_len += len1; l_val_len += len * nsupc; Lnzval_size[ljb] = len * nsupc ; // ### } else { Lnzval_size[ljb] = 0 ; // ### } } } /* endfor jb = 0 ... */ /* Copy the actual U indices and values */ int_t u_k = CEILING( nsupers, grid->nprow ); /* Number of local block rows */ int_t *temp_UrowindPtr = (int_t *) malloc(sizeof(int_t) * u_k); int_t *temp_UnzvalPtr = (int_t *) malloc(sizeof(int_t) * u_k); int_t *Unzval_size = (int_t *) malloc(sizeof(int_t) * u_k); int_t u_ind_len = 0; int_t u_val_len = 0; for ( int_t lb = 0; lb < u_k; ++lb) { int_t *index_host; index_host = A_host->Ufstnz_br_ptr[lb]; temp_UrowindPtr[lb] = u_ind_len; temp_UnzvalPtr[lb] = u_val_len; Unzval_size[lb] = 0; if (index_host != NULL && isNodeInMyGrid[lb * Pr + myrow] == 1) { int_t len = index_host[1]; int_t len1 = index_host[2]; u_ind_len += len1; u_val_len += len; Unzval_size[lb] = len; } else { Unzval_size[lb] = 0; } } gpu_mem_used += l_ind_len * sizeof(int_t); gpu_mem_used += 2 * l_k * sizeof(int_t); gpu_mem_used += u_ind_len * sizeof(int_t); gpu_mem_used += 2 * u_k * sizeof(int_t); /*left memory shall be divided among the two */ for (int_t i = 0; i < l_k; ++i) { temp_LnzvalPtr[i] = -1; } for (int_t i = 0; i < u_k; ++i) { temp_UnzvalPtr[i] = -1; } /*setting these pointers back */ l_val_len = 0; u_val_len = 0; int_t num_gpu_l_blocks = 0; int_t num_gpu_u_blocks = 0; size_t mem_l_block, mem_u_block; /* Find the trailing matrix size that can fit into GPU memory */ for (int_t i = nsupers - 1; i > -1; --i) { /* ulte se chalte hai eleimination tree */ /* bottom up ordering */ int_t i_sup = A_gpu->perm_c_supno[i]; int_t pc = PCOL( i_sup, grid ); if (isNodeInMyGrid[i_sup] == 1) { if (mycol == pc ) { int_t ljb = LBj(i_sup, grid); mem_l_block = sizeof(float) * Lnzval_size[ljb]; if (gpu_mem_used + mem_l_block > max_gpu_memory) { break; } else { gpu_mem_used += mem_l_block; temp_LnzvalPtr[ljb] = l_val_len; l_val_len += Lnzval_size[ljb]; num_gpu_l_blocks++; A_gpu->first_l_block_gpu = i; } } int_t pr = PROW( i_sup, grid ); if (myrow == pr) { int_t lib = LBi(i_sup, grid); mem_u_block = sizeof(float) * Unzval_size[lib]; if (gpu_mem_used + mem_u_block > max_gpu_memory) { break; } else { gpu_mem_used += mem_u_block; temp_UnzvalPtr[lib] = u_val_len; u_val_len += Unzval_size[lib]; num_gpu_u_blocks++; A_gpu->first_u_block_gpu = i; } } } /* endif */ } /* endfor i .... nsupers */ #if (PRNTlevel>=2) printf("(%d) Number of L blocks in GPU %d, U blocks %d\n", grid3d->iam, num_gpu_l_blocks, num_gpu_u_blocks ); printf("(%d) elimination order of first block in GPU: L block %d, U block %d\n", grid3d->iam, A_gpu->first_l_block_gpu, A_gpu->first_u_block_gpu); printf("(%d) Memory of L %.1f GB, memory for U %.1f GB, Total device memory used %.1f GB, Memory allowed %.1f GB \n", grid3d->iam, l_val_len * sizeof(float) * 1e-9, u_val_len * sizeof(float) * 1e-9, gpu_mem_used * 1e-9, max_gpu_memory * 1e-9); fflush(stdout); #endif /* Assemble index vector on temp */ int_t *indtemp = (int_t *) malloc(sizeof(int_t) * l_ind_len); for (int_t jb = 0; jb < nsupers; ++jb) /* for each block column ... */ { int_t pc = PCOL( jb, grid ); if (mycol == pc && isNodeInMyGrid[jb] == 1) { int_t ljb = LBj( jb, grid ); /* Local block number */ int_t *index_host; index_host = A_host->Lrowind_bc_ptr[ljb]; if (index_host != NULL) { int_t nrbl = index_host[0]; /* number of L blocks */ int_t len = index_host[1]; /* LDA of the nzval[] */ int_t len1 = len + BC_HEADER + nrbl * LB_DESCRIPTOR; memcpy(&indtemp[temp_LrowindPtr[ljb]] , index_host, len1 * sizeof(int_t)) ; } } } checkGPUErrors(gpuMalloc( &tmp_ptr, l_ind_len * sizeof(int_t))) ; A_gpu->LrowindVec = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LrowindVec), indtemp, l_ind_len * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, l_val_len * sizeof(float))); A_gpu->LnzvalVec = (float *) tmp_ptr; checkGPUErrors(gpuMemset( (A_gpu->LnzvalVec), 0, l_val_len * sizeof(float))); checkGPUErrors(gpuMalloc( &tmp_ptr, l_k * sizeof(int_t))) ; A_gpu->LrowindPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LrowindPtr), temp_LrowindPtr, l_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, l_k * sizeof(int_t))) ; A_gpu->LnzvalPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->LnzvalPtr), temp_LnzvalPtr, l_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; A_gpu->LnzvalPtr_host = temp_LnzvalPtr; int_t *indtemp1 = (int_t *) malloc(sizeof(int_t) * u_ind_len); for ( int_t lb = 0; lb < u_k; ++lb) { int_t *index_host; index_host = A_host->Ufstnz_br_ptr[lb]; if (index_host != NULL && isNodeInMyGrid[lb * Pr + myrow] == 1) { int_t len1 = index_host[2]; memcpy(&indtemp1[temp_UrowindPtr[lb]] , index_host, sizeof(int_t)*len1); } } checkGPUErrors(gpuMalloc( &tmp_ptr, u_ind_len * sizeof(int_t))) ; A_gpu->UrowindVec = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UrowindVec), indtemp1, u_ind_len * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, u_val_len * sizeof(float))); A_gpu->UnzvalVec = (float *) tmp_ptr; checkGPUErrors(gpuMemset( (A_gpu->UnzvalVec), 0, u_val_len * sizeof(float))); checkGPUErrors(gpuMalloc( &tmp_ptr, u_k * sizeof(int_t))) ; A_gpu->UrowindPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UrowindPtr), temp_UrowindPtr, u_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; A_gpu->UnzvalPtr_host = temp_UnzvalPtr; checkGPUErrors(gpuMalloc( &tmp_ptr, u_k * sizeof(int_t))) ; A_gpu->UnzvalPtr = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->UnzvalPtr), temp_UnzvalPtr, u_k * sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, (nsupers + 1)*sizeof(int_t))) ; A_gpu->xsup = (int_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( (A_gpu->xsup), xsup, (nsupers + 1)*sizeof(int_t), gpuMemcpyHostToDevice)) ; checkGPUErrors(gpuMalloc( &tmp_ptr, sizeof(sLUstruct_gpu_t))) ; *dA_gpu = (sLUstruct_gpu_t *) tmp_ptr; checkGPUErrors(gpuMemcpy( *dA_gpu, A_gpu, sizeof(sLUstruct_gpu_t), gpuMemcpyHostToDevice)) ; free (temp_LrowindPtr); free (temp_UrowindPtr); free (indtemp1); free (indtemp); } /* end sCopyLUToGPU3D */ int sreduceAllAncestors3d_GPU(int_t ilvl, int_t* myNodeCount, int_t** treePerm, sLUValSubBuf_t*LUvsb, sLUstruct_t* LUstruct, gridinfo3d_t* grid3d, ssluGPU_t *sluGPU, d2Hreduce_t* d2Hred, factStat_t *factStat, HyP_t* HyP, SCT_t* SCT ) { // first synchronize all gpu streams int superlu_acc_offload = HyP->superlu_acc_offload; int_t maxLvl = log2i( (int_t) grid3d->zscp.Np) + 1; int_t myGrid = grid3d->zscp.Iam; gridinfo_t* grid = &(grid3d->grid2d); int_t* gpuLUreduced = factStat->gpuLUreduced; int_t sender; if ((myGrid % (1 << (ilvl + 1))) == 0) { sender = myGrid + (1 << ilvl); } else { sender = myGrid; } /*Reduce all the ancestors from the GPU*/ if (myGrid == sender && superlu_acc_offload) { for (int_t streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamId] = -1; } for (int_t alvl = ilvl + 1; alvl < maxLvl; ++alvl) { /* code */ // int_t atree = myTreeIdxs[alvl]; int_t nsAncestor = myNodeCount[alvl]; int_t* cAncestorList = treePerm[alvl]; for (int_t node = 0; node < nsAncestor; node++ ) { int_t k = cAncestorList[node]; if (!gpuLUreduced[k]) { sinitD2Hreduce(k, d2Hred, 1, HyP, sluGPU, grid, LUstruct, SCT); int_t copyL_kljb = d2Hred->copyL_kljb; int_t copyU_kljb = d2Hred->copyU_kljb; double tt_start1 = SuperLU_timer_(); SCT->PhiMemCpyTimer += SuperLU_timer_() - tt_start1; if (copyL_kljb || copyU_kljb) SCT->PhiMemCpyCounter++; ssendLUpanelGPU2HOST(k, d2Hred, sluGPU); /* Reduce the LU panels from GPU */ sreduceGPUlu(1, d2Hred, sluGPU, SCT, grid, LUstruct); gpuLUreduced[k] = 1; } } } } /*if (myGrid == sender)*/ sreduceAllAncestors3d(ilvl, myNodeCount, treePerm, LUvsb, LUstruct, grid3d, SCT ); return 0; } /* sreduceAllAncestors3d_GPU */ void ssyncAllfunCallStreams(ssluGPU_t* sluGPU, SCT_t* SCT) { for (int streamId = 0; streamId < sluGPU->nGPUStreams; streamId++) { double ttx = SuperLU_timer_(); gpuStreamSynchronize(sluGPU->funCallStreams[streamId]); SCT->PhiWaitTimer += SuperLU_timer_() - ttx; sluGPU->lastOffloadStream[streamId] = -1; } }
bc290788a053c5c852bb618b1736e2fb60673239.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_utility.cu" #define threadsPerBlock 32 #define IFOR(v, s, e) for(int v = s; v < e; ++v) #define UFOR(v, s, e) for(unsigned v = s; v < e; v++) #define IFORS(v, s, e, step) for(int v = s; v < e; v += step) using namespace std; class ParallelUtility { public: void init_2D_mat(double **(&arr), int row, int col) { arr = new (double*) [row * sizeof(double *)]; IFOR(i, 0, row) arr[i] = new double [col * sizeof(double)]; } double* serialize_2D_mat(double **mat, int r, int c) { int k = 0; double *result = new double[r*c]; IFOR(i, 0, r) IFOR(j, 0, c) { result[k] = mat[i][j]; ++k; } return result; } double **deserialize_2D_mat(double *arr, int r, int c) { int k = 0; double **res = NULL; init_2D_mat(res, r, c); IFOR(i, 0, r) IFOR(j, 0, c) res[i][j] = arr[k++]; return res; } void block_and_grid_dim_get(int len, size_t& block_size, size_t& num_blocks) { block_size = threadsPerBlock; num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); } double **cuda_mat_transpose_helper(double **hostA, int numARows, int numAColumns) { double *hostC = (double *) malloc(numCRows * numCColumns * sizeof(double)); double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); int numCRows = numAColumns, numCColumns = numARows; double *deviceA = NULL, *deviceC = NULL; hipMalloc((void**) &deviceA, numARows * numAColumns * sizeof(double)); hipMalloc((void**) &deviceC, numCRows * numCColumns * sizeof(double)); hipMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); hipLaunchKernelGGL(( cuda_mat_transpose), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceC, numAColumns, numARows, numARows * numAColumns); hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(double), hipMemcpyDeviceToHost); hipFree(deviceA); hipFree(deviceC); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double **cuda_mat_multiply_helper(double **hostA, double **hostB, int numARows, int numAColumns, int numBRows, int numBColumns) { int numCRows = numARows, numCColumns = numBColumns; double* hostC = (double *) malloc(numCRows * numCColumns * sizeof(double)); double *devA = NULL, *devB = NULL, *devC = NULL; double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_serial = serialize_2D_mat(hostB, numBRows, numBColumns); hipMalloc((void**) &devA, numARows * numAColumns * sizeof(double)); hipMalloc((void**) &devB, numBRows * numBColumns * sizeof(double)); hipMalloc((void**) &devC, numCRows * numCColumns * sizeof(double)); hipMemcpy(devA, hostA_serial, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(devB, hostB_serial, numBRows * numBColumns * sizeof(double), hipMemcpyHostToDevice); dim3 dimGrid(1 + (numCColumns/32), 1 + (numCRows/32), 1); dim3 dimBlock(32, 32, 1); hipLaunchKernelGGL(( cuda_mat_multiply) , dim3(dimGrid), dim3(dimBlock), 0, 0, devA, devB, devC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipMemcpy(hostC, devC, numCRows * numCColumns * sizeof(double), hipMemcpyDeviceToHost); hipFree(devA); hipFree(devB); hipFree(devC); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double **cu_addition_helper(double **hostA, double **hostB, int numARows, int numAColumns) { double * hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); double *deviceA, *deviceB, *deviceC; double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_serial = serialize_2D_mat(hostB, numARows, numAColumns); int numCRows = numARows, numCColumns = numAColumns; hipMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); hipMalloc((void **)&deviceB, numARows * numAColumns * sizeof(double)); hipMalloc((void **)&deviceC, numCRows * numCColumns * sizeof(double)); hipMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB_serial, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); hipLaunchKernelGGL(( cu_addition), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceB, deviceC, numARows * numAColumns); hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(double), hipMemcpyDeviceToHost); hipFree(deviceB); hipFree(deviceC); hipFree(deviceA); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double** cu_mat_scalar_multiply_helper(double **hostA, double scalar, int numARows, int numAColumns) { double *deviceA = NULL; double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); hipMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); hipMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); hipLaunchKernelGGL(( cu_mat_scalar_multiply), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, scalar, numARows * numAColumns); hipMemcpy(hostA_serial, deviceA, numARows * numAColumns * sizeof(double), hipMemcpyDeviceToHost); hipFree(deviceA); return deserialize_2D_mat(hostA_serial, numARows, numAColumns); } double** cu_mat_elementwise_multiply_helper(double **hostA, double **hostB, int numARows, int numAColumns) { double *deviceA = NULL, *deviceB = NULL; double *hostB_serial = serialize_2D_mat(hostB, numARows, numAColumns); double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); hipMalloc((void **)&deviceB, numARows * numAColumns * sizeof(double)); hipMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); hipMemcpy(deviceB, hostB_serial, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); hipLaunchKernelGGL(( cu_elementWiseMultiply), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceB, numARows * numAColumns); hipMemcpy(hostA_serial, deviceA, numARows * numAColumns * sizeof(double), hipMemcpyDeviceToHost); hipFree(deviceB); hipFree(deviceA); return deserialize_2D_mat(hostA_serial, numARows, numAColumns); } double **cu_sigmoid_helper(double **hostA, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double * hostC, * deviceA, * deviceC; int numCRows = numAColumns, numCColumns = numARows; hostC = (double *) malloc(numCRows * numCColumns * sizeof(double)); hipMalloc((void **)&deviceC, numCRows * numCColumns * sizeof(double)); hipMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); hipMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); hipLaunchKernelGGL(( cu_sigmoid), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceC, numARows * numAColumns); hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(double), hipMemcpyDeviceToHost); hipFree(deviceA); hipFree(deviceC); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double** cu_2D_1D_addition_helper(double **hostA, double *hostB, int numARows, int numAColumns) { double *hostB_converted = (double*) malloc (numARows * numAColumns * sizeof(double)); double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); int k = 0; IFOR(i, 0, numAColumns) { IFOR(j, 0, numARows) { hostB_converted[k++] = hostB[i]; } } double *hostC = (double *) malloc(numCRows * numCColumns * sizeof(double)); double *deviceA, *deviceB, *deviceC; int numCRows = numARows, numCColumns = numAColumns; hipMalloc((void **)&deviceB, numARows * numAColumns * sizeof(double)); hipMalloc((void **)&deviceC, numCRows * numCColumns * sizeof(double)); hipMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); hipMemcpy(deviceB, hostB_converted, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); hipLaunchKernelGGL(( cu_addition), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, hostB_converted, deviceC, numARows * numAColumns); hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(double), hipMemcpyDeviceToHost); hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double *cu_vec_addition_helper(double *hostA, double *hostB, int n){ double *hostC = (double *) malloc(sizeof(double) * n); double *deviceA = NULL, *deviceB = NULL, *deviceC = NULL; hipMalloc((void **)&deviceA, n * sizeof(double)); hipMalloc((void **)&deviceB, n * sizeof(double)); hipMalloc((void **)&deviceC, n * sizeof(double)); hipMemcpy(deviceA, hostA, n * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, n * sizeof(double), hipMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(n, block_size, num_blocks); hipLaunchKernelGGL(( cu_addition), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceB, deviceC, n); hipMemcpy(hostC, deviceC, n * sizeof(double), hipMemcpyDeviceToHost); hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); return hostC; } double **cu_dsigmoid_helper(double **hostA, int numARows, int numAColumns) { double *hostC = (double *) malloc(numCRows * numCColumns * sizeof(double)); double *deviceA = NULL, *deviceC = NULL; double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); int numCRows = numAColumns, numCColumns = numARows; hipMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); hipMalloc((void **)&deviceC, numCRows * numCColumns * sizeof(double)); hipMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), hipMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); hipLaunchKernelGGL(( cu_dsigmoid), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, deviceC, numARows * numAColumns); hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(double), hipMemcpyDeviceToHost); hipFree(deviceA); hipFree(deviceC); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double* cu_vec_scalar_multiply_helper(double *hostA, double scalar, int n){ double * deviceA; hipMalloc((void **)&deviceA, n * sizeof(double)); hipMemcpy(deviceA, hostA, n * sizeof(double), hipMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(n, block_size, num_blocks); hipLaunchKernelGGL(( cu_mat_scalar_multiply), dim3(num_blocks), dim3(block_size), 0, 0, deviceA, scalar, n); hipMemcpy(hostA, deviceA, n * sizeof(double), hipMemcpyDeviceToHost); hipFree(deviceA); return hostA; } };
bc290788a053c5c852bb618b1736e2fb60673239.cu
#include "cuda_utility.cu" #define threadsPerBlock 32 #define IFOR(v, s, e) for(int v = s; v < e; ++v) #define UFOR(v, s, e) for(unsigned v = s; v < e; v++) #define IFORS(v, s, e, step) for(int v = s; v < e; v += step) using namespace std; class ParallelUtility { public: void init_2D_mat(double **(&arr), int row, int col) { arr = new (double*) [row * sizeof(double *)]; IFOR(i, 0, row) arr[i] = new double [col * sizeof(double)]; } double* serialize_2D_mat(double **mat, int r, int c) { int k = 0; double *result = new double[r*c]; IFOR(i, 0, r) IFOR(j, 0, c) { result[k] = mat[i][j]; ++k; } return result; } double **deserialize_2D_mat(double *arr, int r, int c) { int k = 0; double **res = NULL; init_2D_mat(res, r, c); IFOR(i, 0, r) IFOR(j, 0, c) res[i][j] = arr[k++]; return res; } void block_and_grid_dim_get(int len, size_t& block_size, size_t& num_blocks) { block_size = threadsPerBlock; num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0); } double **cuda_mat_transpose_helper(double **hostA, int numARows, int numAColumns) { double *hostC = (double *) malloc(numCRows * numCColumns * sizeof(double)); double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); int numCRows = numAColumns, numCColumns = numARows; double *deviceA = NULL, *deviceC = NULL; cudaMalloc((void**) &deviceA, numARows * numAColumns * sizeof(double)); cudaMalloc((void**) &deviceC, numCRows * numCColumns * sizeof(double)); cudaMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); cuda_mat_transpose<<<num_blocks, block_size>>>(deviceA, deviceC, numAColumns, numARows, numARows * numAColumns); cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(deviceA); cudaFree(deviceC); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double **cuda_mat_multiply_helper(double **hostA, double **hostB, int numARows, int numAColumns, int numBRows, int numBColumns) { int numCRows = numARows, numCColumns = numBColumns; double* hostC = (double *) malloc(numCRows * numCColumns * sizeof(double)); double *devA = NULL, *devB = NULL, *devC = NULL; double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_serial = serialize_2D_mat(hostB, numBRows, numBColumns); cudaMalloc((void**) &devA, numARows * numAColumns * sizeof(double)); cudaMalloc((void**) &devB, numBRows * numBColumns * sizeof(double)); cudaMalloc((void**) &devC, numCRows * numCColumns * sizeof(double)); cudaMemcpy(devA, hostA_serial, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(devB, hostB_serial, numBRows * numBColumns * sizeof(double), cudaMemcpyHostToDevice); dim3 dimGrid(1 + (numCColumns/32), 1 + (numCRows/32), 1); dim3 dimBlock(32, 32, 1); cuda_mat_multiply <<<dimGrid, dimBlock>>> (devA, devB, devC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaMemcpy(hostC, devC, numCRows * numCColumns * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(devA); cudaFree(devB); cudaFree(devC); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double **cu_addition_helper(double **hostA, double **hostB, int numARows, int numAColumns) { double * hostC = (double *) malloc(sizeof(double)*numCRows*numCColumns); double *deviceA, *deviceB, *deviceC; double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double *hostB_serial = serialize_2D_mat(hostB, numARows, numAColumns); int numCRows = numARows, numCColumns = numAColumns; cudaMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); cudaMalloc((void **)&deviceB, numARows * numAColumns * sizeof(double)); cudaMalloc((void **)&deviceC, numCRows * numCColumns * sizeof(double)); cudaMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB_serial, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); cu_addition<<<num_blocks, block_size>>>(deviceA, deviceB, deviceC, numARows * numAColumns); cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(deviceB); cudaFree(deviceC); cudaFree(deviceA); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double** cu_mat_scalar_multiply_helper(double **hostA, double scalar, int numARows, int numAColumns) { double *deviceA = NULL; double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); cudaMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); cudaMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); cu_mat_scalar_multiply<<<num_blocks, block_size>>>(deviceA, scalar, numARows * numAColumns); cudaMemcpy(hostA_serial, deviceA, numARows * numAColumns * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(deviceA); return deserialize_2D_mat(hostA_serial, numARows, numAColumns); } double** cu_mat_elementwise_multiply_helper(double **hostA, double **hostB, int numARows, int numAColumns) { double *deviceA = NULL, *deviceB = NULL; double *hostB_serial = serialize_2D_mat(hostB, numARows, numAColumns); double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); cudaMalloc((void **)&deviceB, numARows * numAColumns * sizeof(double)); cudaMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); cudaMemcpy(deviceB, hostB_serial, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); cu_elementWiseMultiply<<<num_blocks, block_size>>>(deviceA, deviceB, numARows * numAColumns); cudaMemcpy(hostA_serial, deviceA, numARows * numAColumns * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(deviceB); cudaFree(deviceA); return deserialize_2D_mat(hostA_serial, numARows, numAColumns); } double **cu_sigmoid_helper(double **hostA, int numARows, int numAColumns){ double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); double * hostC, * deviceA, * deviceC; int numCRows = numAColumns, numCColumns = numARows; hostC = (double *) malloc(numCRows * numCColumns * sizeof(double)); cudaMalloc((void **)&deviceC, numCRows * numCColumns * sizeof(double)); cudaMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); cudaMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); cu_sigmoid<<<num_blocks, block_size>>>(deviceA, deviceC, numARows * numAColumns); cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(deviceA); cudaFree(deviceC); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double** cu_2D_1D_addition_helper(double **hostA, double *hostB, int numARows, int numAColumns) { double *hostB_converted = (double*) malloc (numARows * numAColumns * sizeof(double)); double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); int k = 0; IFOR(i, 0, numAColumns) { IFOR(j, 0, numARows) { hostB_converted[k++] = hostB[i]; } } double *hostC = (double *) malloc(numCRows * numCColumns * sizeof(double)); double *deviceA, *deviceB, *deviceC; int numCRows = numARows, numCColumns = numAColumns; cudaMalloc((void **)&deviceB, numARows * numAColumns * sizeof(double)); cudaMalloc((void **)&deviceC, numCRows * numCColumns * sizeof(double)); cudaMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); cudaMemcpy(deviceB, hostB_converted, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); cu_addition<<<num_blocks, block_size>>>(deviceA, hostB_converted, deviceC, numARows * numAColumns); cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double *cu_vec_addition_helper(double *hostA, double *hostB, int n){ double *hostC = (double *) malloc(sizeof(double) * n); double *deviceA = NULL, *deviceB = NULL, *deviceC = NULL; cudaMalloc((void **)&deviceA, n * sizeof(double)); cudaMalloc((void **)&deviceB, n * sizeof(double)); cudaMalloc((void **)&deviceC, n * sizeof(double)); cudaMemcpy(deviceA, hostA, n * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, n * sizeof(double), cudaMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(n, block_size, num_blocks); cu_addition<<<num_blocks, block_size>>>(deviceA, deviceB, deviceC, n); cudaMemcpy(hostC, deviceC, n * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); return hostC; } double **cu_dsigmoid_helper(double **hostA, int numARows, int numAColumns) { double *hostC = (double *) malloc(numCRows * numCColumns * sizeof(double)); double *deviceA = NULL, *deviceC = NULL; double *hostA_serial = serialize_2D_mat(hostA, numARows, numAColumns); int numCRows = numAColumns, numCColumns = numARows; cudaMalloc((void **)&deviceA, numARows * numAColumns * sizeof(double)); cudaMalloc((void **)&deviceC, numCRows * numCColumns * sizeof(double)); cudaMemcpy(deviceA, hostA_serial, numARows * numAColumns * sizeof(double), cudaMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(numARows * numAColumns, block_size, num_blocks); cu_dsigmoid<<<num_blocks, block_size>>>(deviceA, deviceC, numARows * numAColumns); cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(deviceA); cudaFree(deviceC); return deserialize_2D_mat(hostC, numCRows, numCColumns); } double* cu_vec_scalar_multiply_helper(double *hostA, double scalar, int n){ double * deviceA; cudaMalloc((void **)&deviceA, n * sizeof(double)); cudaMemcpy(deviceA, hostA, n * sizeof(double), cudaMemcpyHostToDevice); size_t block_size, num_blocks; block_and_grid_dim_get(n, block_size, num_blocks); cu_mat_scalar_multiply<<<num_blocks, block_size>>>(deviceA, scalar, n); cudaMemcpy(hostA, deviceA, n * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(deviceA); return hostA; } };
d856df3a6d0ed87b1a312fabf1781f23d24b5886.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void rdiv_float(int n, float *a, float *b, float *sum) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { sum[i] = b[i] / a[i]; } }
d856df3a6d0ed87b1a312fabf1781f23d24b5886.cu
extern "C" __global__ void rdiv_float(int n, float *a, float *b, float *sum) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { sum[i] = b[i] / a[i]; } }
c8a09807b750b2fa2a3e214ea67bbffd3e2dac08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ /* Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication and is exactly the same as * Chapter 7 of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * CUBLAS provides high-performance matrix multiplication. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <omp.h> // includes, project #include "matrixMul.h" // includes, kernels #include "matrixMul_kernel.cuh" #include "matrixMul_naive.cuh" #include "matrixMul_tiling.cuh" #include "matrixMul_coalescing.cuh" #include "matrixMul_noBankConflict.cuh" #include "matrixMul_compOpt.cuh" #include "matrixMul_unroll.cuh" #include "matrixMul_prefetch.cuh" //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int testNum); void randomInit(float*, int); void printDiff(float*, float*, int, int); //extern "C" //void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB) { #pragma omp parallel for for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; //hipSetDevice(devID); hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } for(int i = 0;i<=7;i++) runTest(i); exit(EXIT_SUCCESS); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int testNum) { /****************************************************/ /* Preparations */ /****************************************************/ printf("[CUDA Matrix Multiply Using Version %d ] - Starting ...\n", testNum); // utilities hipEvent_t start; hipEvent_t stop; float msecTotal; // allocate host memory for matrices A and B unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A);//galima naudoti ir new float[size_A] unsigned int size_B = WB * HB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); float flop = 2 * (float)WC * (float)HC * (float)WA; // set seed for rand() srand(2017); // initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); // allocate device memory float* d_A; hipMalloc((void**)&d_A, mem_size_A); float* d_B; hipMalloc((void**)&d_B, mem_size_B); // allocate device memory for result unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(float) * size_C; float* d_C; hipMalloc((void**)&d_C, mem_size_C); // allocate host memory for the result float* h_C = (float*)malloc(mem_size_C); #if CHECK_RESULT == 1 // create and start timer /*hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, NULL);*/ double t1 = omp_get_wtime(); // compute reference solution float* reference = (float*)malloc(mem_size_C); computeGold(reference, h_A, h_B, HA, WA, WB); // stop and destroy timer double t2 = omp_get_wtime(); /*hipEventRecord(stop, NULL); hipEventSynchronize(start); hipEventSynchronize(stop); hipEventElapsedTime(&msecTotal, start, stop);*/ msecTotal =1000.* (t2 - t1) ; //printf("Naive CPU (Golden Reference)\n"); printf("CPU processing time: %f (ms), GFLOPS: %f \n", msecTotal, ((flop/1.e+9) / (msecTotal / 1.e+3))); //printf("Processing time2: %f (ms), \n", t2-t1); #endif dim3 threads, grid; /****************************************************/ /* CUDA SDK example */ /****************************************************/ // create and start timer // copy host memory to device // setup execution parameters hipEventCreate(&start); hipEventRecord(start, NULL); t1 = omp_get_wtime(); hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); threads = dim3(BLOCK_SIZE, BLOCK_SIZE); grid = dim3(WC / threads.x, HC / threads.y); // execute the kernel switch (testNum) { case 0: hipLaunchKernelGGL(( matrixMul) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB); break; case 1: hipLaunchKernelGGL(( matrixMul_naive) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB); break; case 2: hipLaunchKernelGGL(( matrixMul_tiling) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB); break; case 3: hipLaunchKernelGGL(( matrixMul_coalescing) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB); break; case 4: hipLaunchKernelGGL(( matrixMul_noBankConflict) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB); break; case 5: threads = dim3(BLOCK_SIZE, 4); grid = dim3(WC / (BLOCK_SIZE * 4), HC / BLOCK_SIZE); hipLaunchKernelGGL(( matrixMul_compOpt) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB); break; case 6: threads = dim3(BLOCK_SIZE, 4); grid = dim3(WC / (BLOCK_SIZE * 4), HC / BLOCK_SIZE); hipLaunchKernelGGL(( matrixMul_unroll) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB); break; case 7: threads = dim3(BLOCK_SIZE, 4); grid = dim3(WC / (BLOCK_SIZE * 4), HC / BLOCK_SIZE); hipLaunchKernelGGL(( matrixMul_prefetch) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, WA, WB); break; } // copy result from device to host hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); hipEventCreate(&stop); hipEventRecord(stop, NULL); hipEventSynchronize(start); hipEventSynchronize(stop); // stop and destroy timer hipEventElapsedTime(&msecTotal, start, stop); t2 = omp_get_wtime(); // msecTotal =1000.*(t2-t1); //printf("GPU SDK Sample\n"); printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal / 1e+6); #if CHECK_RESULT == 1 // check result printDiff(reference, h_C, WC, HC); #endif /****************************************************/ /* Cleaning */ /****************************************************/ // clean up memory free(h_A); free(h_B); free(h_C); #if CHECK_RESULT == 1 free(reference); #endif hipFree(d_A); hipFree(d_B); hipFree(d_C); //hipDeviceReset(); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = ((float)rand()) / (float)RAND_MAX; } void printDiff(float *data1, float *data2, int width, int height) { int i, j, k; int error_count = 0; for (j = 0; j<height; j++) { for (i = 0; i<width; i++) { k = j*width + i; if (fabs(data1[k] - data2[k]) > 0.1) { printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f \n", i, j, data1[k], data2[k]); error_count++; if(error_count>2) return; } } } printf("Total Errors = %d \n", error_count); }
c8a09807b750b2fa2a3e214ea67bbffd3e2dac08.cu
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ /* Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication and is exactly the same as * Chapter 7 of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * CUBLAS provides high-performance matrix multiplication. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <omp.h> // includes, project #include "matrixMul.h" // includes, kernels #include "matrixMul_kernel.cuh" #include "matrixMul_naive.cuh" #include "matrixMul_tiling.cuh" #include "matrixMul_coalescing.cuh" #include "matrixMul_noBankConflict.cuh" #include "matrixMul_compOpt.cuh" #include "matrixMul_unroll.cuh" #include "matrixMul_prefetch.cuh" //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int testNum); void randomInit(float*, int); void printDiff(float*, float*, int, int); //extern "C" //void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB) { #pragma omp parallel for for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; //cudaSetDevice(devID); cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } for(int i = 0;i<=7;i++) runTest(i); exit(EXIT_SUCCESS); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int testNum) { /****************************************************/ /* Preparations */ /****************************************************/ printf("[CUDA Matrix Multiply Using Version %d ] - Starting ...\n", testNum); // utilities cudaEvent_t start; cudaEvent_t stop; float msecTotal; // allocate host memory for matrices A and B unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A);//galima naudoti ir new float[size_A] unsigned int size_B = WB * HB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); float flop = 2 * (float)WC * (float)HC * (float)WA; // set seed for rand() srand(2017); // initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); // allocate device memory float* d_A; cudaMalloc((void**)&d_A, mem_size_A); float* d_B; cudaMalloc((void**)&d_B, mem_size_B); // allocate device memory for result unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(float) * size_C; float* d_C; cudaMalloc((void**)&d_C, mem_size_C); // allocate host memory for the result float* h_C = (float*)malloc(mem_size_C); #if CHECK_RESULT == 1 // create and start timer /*cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL);*/ double t1 = omp_get_wtime(); // compute reference solution float* reference = (float*)malloc(mem_size_C); computeGold(reference, h_A, h_B, HA, WA, WB); // stop and destroy timer double t2 = omp_get_wtime(); /*cudaEventRecord(stop, NULL); cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&msecTotal, start, stop);*/ msecTotal =1000.* (t2 - t1) ; //printf("Naive CPU (Golden Reference)\n"); printf("CPU processing time: %f (ms), GFLOPS: %f \n", msecTotal, ((flop/1.e+9) / (msecTotal / 1.e+3))); //printf("Processing time2: %f (ms), \n", t2-t1); #endif dim3 threads, grid; /****************************************************/ /* CUDA SDK example */ /****************************************************/ // create and start timer // copy host memory to device // setup execution parameters cudaEventCreate(&start); cudaEventRecord(start, NULL); t1 = omp_get_wtime(); cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); threads = dim3(BLOCK_SIZE, BLOCK_SIZE); grid = dim3(WC / threads.x, HC / threads.y); // execute the kernel switch (testNum) { case 0: matrixMul <<< grid, threads >>> (d_C, d_A, d_B, WA, WB); break; case 1: matrixMul_naive <<< grid, threads >>>(d_C, d_A, d_B, WA, WB); break; case 2: matrixMul_tiling <<< grid, threads >>>(d_C, d_A, d_B, WA, WB); break; case 3: matrixMul_coalescing <<< grid, threads >>>(d_C, d_A, d_B, WA, WB); break; case 4: matrixMul_noBankConflict <<< grid, threads >>>(d_C, d_A, d_B, WA, WB); break; case 5: threads = dim3(BLOCK_SIZE, 4); grid = dim3(WC / (BLOCK_SIZE * 4), HC / BLOCK_SIZE); matrixMul_compOpt <<< grid, threads >>>(d_C, d_A, d_B, WA, WB); break; case 6: threads = dim3(BLOCK_SIZE, 4); grid = dim3(WC / (BLOCK_SIZE * 4), HC / BLOCK_SIZE); matrixMul_unroll <<< grid, threads >>>(d_C, d_A, d_B, WA, WB); break; case 7: threads = dim3(BLOCK_SIZE, 4); grid = dim3(WC / (BLOCK_SIZE * 4), HC / BLOCK_SIZE); matrixMul_prefetch <<< grid, threads >>>(d_C, d_A, d_B, WA, WB); break; } // copy result from device to host cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); cudaEventCreate(&stop); cudaEventRecord(stop, NULL); cudaEventSynchronize(start); cudaEventSynchronize(stop); // stop and destroy timer cudaEventElapsedTime(&msecTotal, start, stop); t2 = omp_get_wtime(); // msecTotal =1000.*(t2-t1); //printf("GPU SDK Sample\n"); printf("Processing time: %f (ms), GFLOPS: %f \n", msecTotal, flop / msecTotal / 1e+6); #if CHECK_RESULT == 1 // check result printDiff(reference, h_C, WC, HC); #endif /****************************************************/ /* Cleaning */ /****************************************************/ // clean up memory free(h_A); free(h_B); free(h_C); #if CHECK_RESULT == 1 free(reference); #endif cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); //cudaThreadExit(); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = ((float)rand()) / (float)RAND_MAX; } void printDiff(float *data1, float *data2, int width, int height) { int i, j, k; int error_count = 0; for (j = 0; j<height; j++) { for (i = 0; i<width; i++) { k = j*width + i; if (fabs(data1[k] - data2[k]) > 0.1) { printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f \n", i, j, data1[k], data2[k]); error_count++; if(error_count>2) return; } } } printf("Total Errors = %d \n", error_count); }
c01a1945fd2eb3beabb5e0d13453bb9b42a7cba0.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2021-2022 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/matrix/matvecmul.h" #include "cunumeric/matrix/matvecmul_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { template <> struct MatVecMulImplBody<VariantKind::GPU, Type::Code::FLOAT32> { void operator()(size_t m, size_t n, float* lhs, const float* mat, const float* vec, size_t mat_stride, bool transpose_mat, bool lhs_overwritable) { auto cublas_handle = get_cublas(); auto task_stream = get_cached_stream(); CHECK_CUBLAS(hipblasSetStream(cublas_handle, task_stream)); const float alpha = 1.0; // lhs_overwritable being true means that the matvecmul tasks can overwrite the lhs const float beta = lhs_overwritable ? 0.0 : 1.0; auto trans = transpose_mat ? HIPBLAS_OP_N : HIPBLAS_OP_T; // XXX: There is a bug in older versions of cuBLAS that are triggered // by some degenerate matrix-vector multiplications. We simply use // matrix-matrix multiplication all the time unless we're on a recent // cuBLAS version int32_t version; CHECK_CUBLAS(cublasGetVersion(cublas_handle, &version)); if (version >= 11700) CHECK_CUBLAS( hipblasSgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); else CHECK_CUBLAS(cublasSgemmEx(cublas_handle, trans, HIPBLAS_OP_N, transpose_mat ? n : m, 1, transpose_mat ? m : n, &alpha, mat, HIP_R_32F, mat_stride, vec, HIP_R_32F, transpose_mat ? m : n, &beta, lhs, HIP_R_32F, transpose_mat ? n : m)); CHECK_CUDA_STREAM(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, Type::Code::FLOAT64> { void operator()(size_t m, size_t n, double* lhs, const double* mat, const double* vec, size_t mat_stride, bool transpose_mat, bool lhs_overwritable) { auto cublas_handle = get_cublas(); auto task_stream = get_cached_stream(); CHECK_CUBLAS(hipblasSetStream(cublas_handle, task_stream)); const double alpha = 1.0; const double beta = lhs_overwritable ? 0.0 : 1.0; auto trans = transpose_mat ? HIPBLAS_OP_N : HIPBLAS_OP_T; // FIXME: It's actually unknown that the cuBLAS bug for 32-bit floats reproduces for // 64-bit flots as well. We're simply being conservative here. int32_t version; CHECK_CUBLAS(cublasGetVersion(cublas_handle, &version)); if (version >= 11700) CHECK_CUBLAS( hipblasDgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); else CHECK_CUBLAS(hipblasDgemm(cublas_handle, trans, HIPBLAS_OP_N, transpose_mat ? n : m, 1, transpose_mat ? m : n, &alpha, mat, mat_stride, vec, transpose_mat ? m : n, &beta, lhs, transpose_mat ? n : m)); CHECK_CUDA_STREAM(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, Type::Code::FLOAT16> { void operator()(size_t m, size_t n, float* lhs, const __half* mat, const __half* vec, size_t mat_stride, bool transpose_mat, bool lhs_overwritable) { auto cublas_handle = get_cublas(); auto task_stream = get_cached_stream(); CHECK_CUBLAS(hipblasSetStream(cublas_handle, task_stream)); const float alpha = 1.0; const float beta = lhs_overwritable ? 0.0 : 1.0; auto trans = transpose_mat ? HIPBLAS_OP_N : HIPBLAS_OP_T; // Use SgemmEx here since there is no half precision gemv yet CHECK_CUBLAS(cublasSgemmEx(cublas_handle, trans, HIPBLAS_OP_N, transpose_mat ? n : m, 1, transpose_mat ? m : n, &alpha, mat, HIP_R_16F, mat_stride, vec, HIP_R_16F, transpose_mat ? m : n, &beta, lhs, HIP_R_32F, transpose_mat ? n : m)); CHECK_CUDA_STREAM(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, Type::Code::COMPLEX64> { void operator()(size_t m, size_t n, complex<float>* lhs_, const complex<float>* mat_, const complex<float>* vec_, size_t mat_stride, bool transpose_mat, bool lhs_overwritable) { hipComplex* lhs = reinterpret_cast<hipComplex*>(lhs_); const hipComplex* mat = reinterpret_cast<const hipComplex*>(mat_); const hipComplex* vec = reinterpret_cast<const hipComplex*>(vec_); auto cublas_handle = get_cublas(); auto task_stream = get_cached_stream(); CHECK_CUBLAS(hipblasSetStream(cublas_handle, task_stream)); const hipComplex alpha = make_float2(1.0, 0.0); const hipComplex beta = make_float2(lhs_overwritable ? 0.0 : 1.0, 0.0); auto trans = transpose_mat ? HIPBLAS_OP_N : HIPBLAS_OP_T; // FIXME: It's actually unknown that the cuBLAS bug for 32-bit floats reproduces for // complex64 as well. We're simply being conservative here. int32_t version; CHECK_CUBLAS(cublasGetVersion(cublas_handle, &version)); if (version >= 11700) CHECK_CUBLAS( hipblasCgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); else CHECK_CUBLAS(hipblasCgemmEx(cublas_handle, trans, HIPBLAS_OP_N, transpose_mat ? n : m, 1, transpose_mat ? m : n, &alpha, mat, HIP_C_32F, mat_stride, vec, HIP_C_32F, transpose_mat ? m : n, &beta, lhs, HIP_C_32F, transpose_mat ? n : m)); CHECK_CUDA_STREAM(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, Type::Code::COMPLEX128> { void operator()(size_t m, size_t n, complex<double>* lhs_, const complex<double>* mat_, const complex<double>* vec_, size_t mat_stride, bool transpose_mat, bool lhs_overwritable) { hipDoubleComplex* lhs = reinterpret_cast<hipDoubleComplex*>(lhs_); const hipDoubleComplex* mat = reinterpret_cast<const hipDoubleComplex*>(mat_); const hipDoubleComplex* vec = reinterpret_cast<const hipDoubleComplex*>(vec_); auto cublas_handle = get_cublas(); auto task_stream = get_cached_stream(); CHECK_CUBLAS(hipblasSetStream(cublas_handle, task_stream)); const hipDoubleComplex alpha = make_double2(1.0, 0.0); const hipDoubleComplex beta = make_double2(lhs_overwritable ? 0.0 : 1.0, 0.0); auto trans = transpose_mat ? HIPBLAS_OP_N : HIPBLAS_OP_T; // FIXME: It's actually unknown that the cuBLAS bug for 32-bit floats reproduces for // complex128 as well. We're simply being conservative here. int32_t version; CHECK_CUBLAS(cublasGetVersion(cublas_handle, &version)); if (version >= 11700) CHECK_CUBLAS( hipblasZgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); else CHECK_CUBLAS(hipblasZgemm(cublas_handle, trans, HIPBLAS_OP_N, transpose_mat ? n : m, 1, transpose_mat ? m : n, &alpha, mat, mat_stride, vec, transpose_mat ? m : n, &beta, lhs, transpose_mat ? n : m)); CHECK_CUDA_STREAM(task_stream); } }; /*static*/ void MatVecMulTask::gpu_variant(TaskContext& context) { matvecmul_template<VariantKind::GPU>(context); } } // namespace cunumeric
c01a1945fd2eb3beabb5e0d13453bb9b42a7cba0.cu
/* Copyright 2021-2022 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/matrix/matvecmul.h" #include "cunumeric/matrix/matvecmul_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { template <> struct MatVecMulImplBody<VariantKind::GPU, Type::Code::FLOAT32> { void operator()(size_t m, size_t n, float* lhs, const float* mat, const float* vec, size_t mat_stride, bool transpose_mat, bool lhs_overwritable) { auto cublas_handle = get_cublas(); auto task_stream = get_cached_stream(); CHECK_CUBLAS(cublasSetStream(cublas_handle, task_stream)); const float alpha = 1.0; // lhs_overwritable being true means that the matvecmul tasks can overwrite the lhs const float beta = lhs_overwritable ? 0.0 : 1.0; auto trans = transpose_mat ? CUBLAS_OP_N : CUBLAS_OP_T; // XXX: There is a bug in older versions of cuBLAS that are triggered // by some degenerate matrix-vector multiplications. We simply use // matrix-matrix multiplication all the time unless we're on a recent // cuBLAS version int32_t version; CHECK_CUBLAS(cublasGetVersion(cublas_handle, &version)); if (version >= 11700) CHECK_CUBLAS( cublasSgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); else CHECK_CUBLAS(cublasSgemmEx(cublas_handle, trans, CUBLAS_OP_N, transpose_mat ? n : m, 1, transpose_mat ? m : n, &alpha, mat, CUDA_R_32F, mat_stride, vec, CUDA_R_32F, transpose_mat ? m : n, &beta, lhs, CUDA_R_32F, transpose_mat ? n : m)); CHECK_CUDA_STREAM(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, Type::Code::FLOAT64> { void operator()(size_t m, size_t n, double* lhs, const double* mat, const double* vec, size_t mat_stride, bool transpose_mat, bool lhs_overwritable) { auto cublas_handle = get_cublas(); auto task_stream = get_cached_stream(); CHECK_CUBLAS(cublasSetStream(cublas_handle, task_stream)); const double alpha = 1.0; const double beta = lhs_overwritable ? 0.0 : 1.0; auto trans = transpose_mat ? CUBLAS_OP_N : CUBLAS_OP_T; // FIXME: It's actually unknown that the cuBLAS bug for 32-bit floats reproduces for // 64-bit flots as well. We're simply being conservative here. int32_t version; CHECK_CUBLAS(cublasGetVersion(cublas_handle, &version)); if (version >= 11700) CHECK_CUBLAS( cublasDgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); else CHECK_CUBLAS(cublasDgemm(cublas_handle, trans, CUBLAS_OP_N, transpose_mat ? n : m, 1, transpose_mat ? m : n, &alpha, mat, mat_stride, vec, transpose_mat ? m : n, &beta, lhs, transpose_mat ? n : m)); CHECK_CUDA_STREAM(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, Type::Code::FLOAT16> { void operator()(size_t m, size_t n, float* lhs, const __half* mat, const __half* vec, size_t mat_stride, bool transpose_mat, bool lhs_overwritable) { auto cublas_handle = get_cublas(); auto task_stream = get_cached_stream(); CHECK_CUBLAS(cublasSetStream(cublas_handle, task_stream)); const float alpha = 1.0; const float beta = lhs_overwritable ? 0.0 : 1.0; auto trans = transpose_mat ? CUBLAS_OP_N : CUBLAS_OP_T; // Use SgemmEx here since there is no half precision gemv yet CHECK_CUBLAS(cublasSgemmEx(cublas_handle, trans, CUBLAS_OP_N, transpose_mat ? n : m, 1, transpose_mat ? m : n, &alpha, mat, CUDA_R_16F, mat_stride, vec, CUDA_R_16F, transpose_mat ? m : n, &beta, lhs, CUDA_R_32F, transpose_mat ? n : m)); CHECK_CUDA_STREAM(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, Type::Code::COMPLEX64> { void operator()(size_t m, size_t n, complex<float>* lhs_, const complex<float>* mat_, const complex<float>* vec_, size_t mat_stride, bool transpose_mat, bool lhs_overwritable) { cuComplex* lhs = reinterpret_cast<cuComplex*>(lhs_); const cuComplex* mat = reinterpret_cast<const cuComplex*>(mat_); const cuComplex* vec = reinterpret_cast<const cuComplex*>(vec_); auto cublas_handle = get_cublas(); auto task_stream = get_cached_stream(); CHECK_CUBLAS(cublasSetStream(cublas_handle, task_stream)); const cuComplex alpha = make_float2(1.0, 0.0); const cuComplex beta = make_float2(lhs_overwritable ? 0.0 : 1.0, 0.0); auto trans = transpose_mat ? CUBLAS_OP_N : CUBLAS_OP_T; // FIXME: It's actually unknown that the cuBLAS bug for 32-bit floats reproduces for // complex64 as well. We're simply being conservative here. int32_t version; CHECK_CUBLAS(cublasGetVersion(cublas_handle, &version)); if (version >= 11700) CHECK_CUBLAS( cublasCgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); else CHECK_CUBLAS(cublasCgemmEx(cublas_handle, trans, CUBLAS_OP_N, transpose_mat ? n : m, 1, transpose_mat ? m : n, &alpha, mat, CUDA_C_32F, mat_stride, vec, CUDA_C_32F, transpose_mat ? m : n, &beta, lhs, CUDA_C_32F, transpose_mat ? n : m)); CHECK_CUDA_STREAM(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, Type::Code::COMPLEX128> { void operator()(size_t m, size_t n, complex<double>* lhs_, const complex<double>* mat_, const complex<double>* vec_, size_t mat_stride, bool transpose_mat, bool lhs_overwritable) { cuDoubleComplex* lhs = reinterpret_cast<cuDoubleComplex*>(lhs_); const cuDoubleComplex* mat = reinterpret_cast<const cuDoubleComplex*>(mat_); const cuDoubleComplex* vec = reinterpret_cast<const cuDoubleComplex*>(vec_); auto cublas_handle = get_cublas(); auto task_stream = get_cached_stream(); CHECK_CUBLAS(cublasSetStream(cublas_handle, task_stream)); const cuDoubleComplex alpha = make_double2(1.0, 0.0); const cuDoubleComplex beta = make_double2(lhs_overwritable ? 0.0 : 1.0, 0.0); auto trans = transpose_mat ? CUBLAS_OP_N : CUBLAS_OP_T; // FIXME: It's actually unknown that the cuBLAS bug for 32-bit floats reproduces for // complex128 as well. We're simply being conservative here. int32_t version; CHECK_CUBLAS(cublasGetVersion(cublas_handle, &version)); if (version >= 11700) CHECK_CUBLAS( cublasZgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); else CHECK_CUBLAS(cublasZgemm(cublas_handle, trans, CUBLAS_OP_N, transpose_mat ? n : m, 1, transpose_mat ? m : n, &alpha, mat, mat_stride, vec, transpose_mat ? m : n, &beta, lhs, transpose_mat ? n : m)); CHECK_CUDA_STREAM(task_stream); } }; /*static*/ void MatVecMulTask::gpu_variant(TaskContext& context) { matvecmul_template<VariantKind::GPU>(context); } } // namespace cunumeric
8ad5dbcf5251d9e7663e0ba0b9d9bdad26a768e1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <sys/types.h> #include <sys/time.h> #include <hip/hip_runtime.h> #define BW 16 // Block Width #define BH 32 // Block Height #define COUNT 0 // Kernel Function handles first nested for loop __global__ void kernelBlur(int *d_Rnew, int *d_Gnew, int *d_Bnew, int *d_R, int *d_G, int *d_B, int rowsize, int colsize) { // Set-up int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; // Run Some Calculations if (col < colsize && row < rowsize) { if (row != 0 && row != (rowsize-1) && col != 0 && col != (colsize-1)) { d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)]+d_R[row * colsize + (col - 1)])/4; d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)]+d_G[row * colsize + (col - 1)])/4; d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)]+d_B[row * colsize + (col - 1)])/4; } else if (row == 0 && col != 0 && col != (colsize-1)){ d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[row * colsize + (col + 1)]+d_R[row * colsize + (col - 1)])/3; d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[row * colsize + (col + 1)]+d_G[row * colsize + (col - 1)])/3; d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[row * colsize + (col + 1)]+d_B[row * colsize + (col - 1)])/3; } else if (row == (rowsize-1) && col != 0 && col != (colsize-1)){ d_Rnew[row * colsize + col] = (d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)]+d_R[row * colsize + (col - 1)])/3; d_Gnew[row * colsize + col] = (d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)]+d_G[row * colsize + (col - 1)])/3; d_Bnew[row * colsize + col] = (d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)]+d_B[row * colsize + (col - 1)])/3; } else if (col == 0 && row != 0 && row != (rowsize-1)){ d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)])/3; d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)])/3; d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)])/3; } else if (col == (colsize-1) && row != 0 && row != (rowsize-1)){ d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)])/3; d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)])/3; d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)])/3; } else if (row==0 &&col==0){ d_Rnew[row * colsize + col] = (d_R[row * colsize + (col + 1)]+d_R[(row + 1) * colsize + col])/2; d_Gnew[row * colsize + col] = (d_G[row * colsize + (col + 1)]+d_G[(row + 1) * colsize + col])/2; d_Bnew[row * colsize + col] = (d_B[row * colsize + (col + 1)]+d_B[(row + 1) * colsize + col])/2; } else if (row==0 &&col==(colsize-1)){ d_Rnew[row * colsize + col] = (d_R[row * colsize + (col - 1)]+d_R[(row + 1) * colsize + col])/2; d_Gnew[row * colsize + col] = (d_G[row * colsize + (col - 1)]+d_G[(row + 1) * colsize + col])/2; d_Bnew[row * colsize + col] = (d_B[row * colsize + (col - 1)]+d_B[(row + 1) * colsize + col])/2; } else if (row==(rowsize-1) &&col==0){ d_Rnew[row * colsize + col] = (d_R[row * colsize + (col + 1)]+d_R[(row - 1) * colsize + col])/2; d_Gnew[row * colsize + col] = (d_G[row * colsize + (col + 1)]+d_G[(row - 1) * colsize + col])/2; d_Bnew[row * colsize + col] = (d_B[row * colsize + (col + 1)]+d_B[(row - 1) * colsize + col])/2; } else if (row==(rowsize-1) &&col==(colsize-1)){ d_Rnew[row * colsize + col] = (d_R[row * colsize + (col - 1)]+d_R[(row - 1) * colsize + col])/2; d_Gnew[row * colsize + col] = (d_G[row * colsize + (col - 1)]+d_G[(row - 1) * colsize + col])/2; d_Bnew[row * colsize + col] = (d_B[row * colsize + (col - 1)]+d_B[(row - 1) * colsize + col])/2; } } } // Kernel Function handles second nested for loop updates RGB values to new calculated values __global__ void kernelCopy(int *d_Rnew, int *d_Gnew, int *d_Bnew, int *d_R, int *d_G, int *d_B, int rowsize, int colsize) { // Set-up int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if (col < colsize && row < rowsize) { d_R[row * colsize + col] = d_Rnew[row * colsize + col]; d_G[row * colsize + col] = d_Gnew[row * colsize + col]; d_B[row * colsize + col] = d_Bnew[row * colsize + col]; } } void performBlurs(int *h_R, int *h_G, int *h_B, int *h_Rnew, int *h_Gnew, int *h_Bnew, int rowsize, int colsize, int nblurs) { // Assign Memory on GPU // Step 1 Assign Memory on GPU int k; int sizei = sizeof(int)*rowsize*colsize; int *d_R, *d_G, *d_B, *d_Rnew, *d_Gnew, *d_Bnew; struct timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); hipMalloc((void **)&d_R,sizei); hipMalloc((void **)&d_G,sizei); hipMalloc((void **)&d_B,sizei); hipMalloc((void **)&d_Rnew,sizei); hipMalloc((void **)&d_Gnew,sizei); hipMalloc((void **)&d_Bnew,sizei); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Assigning Memory to GPU > %.6lf seconds elapsed\n", t2-t1); // Transfer to Device gettimeofday(&tim, NULL); t1=tim.tv_sec+(tim.tv_usec/1000000.0); hipMemcpy(d_R, h_R, sizei, hipMemcpyHostToDevice); hipMemcpy(d_G, h_G, sizei, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, sizei, hipMemcpyHostToDevice); hipMemcpy(d_Rnew, h_Rnew, sizei, hipMemcpyHostToDevice); hipMemcpy(d_Gnew, h_Gnew, sizei, hipMemcpyHostToDevice); hipMemcpy(d_Bnew, h_Bnew, sizei, hipMemcpyHostToDevice); t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Transferring from host to device memory > %.6lf seconds elapsed\n", t2-t1); // Set up Blocks dim3 dimGrid(ceil(colsize/(float)BW), ceil(rowsize/(float)BH), 1); dim3 dimBlock(BW,BH); nblurs = 10; // Modify as Needed gettimeofday(&tim, NULL); t1=tim.tv_sec+(tim.tv_usec/1000000.0); for (k = 0; k < nblurs; ++k) { hipLaunchKernelGGL(( kernelBlur), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Rnew, d_Gnew, d_Bnew, d_R, d_G, d_B, rowsize, colsize); hipLaunchKernelGGL(( kernelCopy), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Rnew, d_Gnew, d_Bnew, d_R, d_G, d_B, rowsize, colsize); } t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Blurring Operation > %.6lf seconds elapsed\n", t2-t1); // Step 4 output copied from GPU to Host get the RGB values hipMemcpy(h_R, d_R, sizei, hipMemcpyDeviceToHost); hipMemcpy(h_G, d_G, sizei, hipMemcpyDeviceToHost); hipMemcpy(h_B, d_B, sizei, hipMemcpyDeviceToHost); // Step 5 Free Memory hipFree(d_R); hipFree(d_G); hipFree(d_B); hipFree(d_Rnew); hipFree(d_Gnew); hipFree(d_Bnew); } int main (int argc, const char * argv[]) { // Assignment of initial Variables static int const maxlen = 200, rowsize = 521, colsize = 428, linelen = 12; static char str[200], lines[5][200]; FILE *fp, *fout; int nlines = 0; unsigned int h1, h2, h3; char *sptr; // Define Host Arrays int *h_R, *h_G, *h_B; int *h_Rnew, *h_Gnew, *h_Bnew; int size = sizeof(int) * rowsize * colsize; h_R = (int *)malloc(size); h_G = (int *)malloc(size); h_B = (int *)malloc(size); h_Rnew = (int *)malloc(size); h_Gnew = (int *)malloc(size); h_Bnew = (int *)malloc(size); // Allocate Overall Size of ROw int row = 0, col = 0, nblurs = 0, lineno=0, k; // Read input file struct timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); fp = fopen("sample.ps", "r"); while(! feof(fp)) { fscanf(fp, "\n%[^\n]", str); if (nlines < 5) {strcpy((char *)lines[nlines++],(char *)str);} else{ for (sptr=&str[0];*sptr != '\0';sptr+=6){ sscanf(sptr,"%2x",&h1); sscanf(sptr+2,"%2x",&h2); sscanf(sptr+4,"%2x",&h3); if (col==colsize){ col = 0; row++; } if (row < rowsize) { h_R[row * colsize + col] = h1; h_G[row * colsize + col] = h2; h_B[row * colsize + col] = h3; } col++; } } } fclose(fp); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Reading Input File > %.6lf seconds elapsed\n", t2-t1); // Run Code performBlurs(h_R, h_G, h_B, h_Rnew, h_Gnew, h_Bnew, rowsize, colsize, nblurs); gettimeofday(&tim, NULL); t1=tim.tv_sec+(tim.tv_usec/1000000.0); fout= fopen("sampleBlurCU.ps", "w"); for (k=0;k<nlines;k++) fprintf(fout,"\n%s", lines[k]); fprintf(fout,"\n"); for(row=0;row<rowsize;row++){ for (col=0;col<colsize;col++){ fprintf(fout,"%02x%02x%02x",h_R[row * colsize + col],h_G[row * colsize + col],h_B[row * colsize + col]); lineno++; if (lineno==linelen){ fprintf(fout,"\n"); lineno = 0; } } } gettimeofday(&tim, NULL); t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Outputting File > %.6lf seconds elapsed\n", t2-t1); fclose(fout); return 0; }
8ad5dbcf5251d9e7663e0ba0b9d9bdad26a768e1.cu
#include <stdio.h> #include <string.h> #include <sys/types.h> #include <sys/time.h> #include <cuda.h> #define BW 16 // Block Width #define BH 32 // Block Height #define COUNT 0 // Kernel Function handles first nested for loop __global__ void kernelBlur(int *d_Rnew, int *d_Gnew, int *d_Bnew, int *d_R, int *d_G, int *d_B, int rowsize, int colsize) { // Set-up int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; // Run Some Calculations if (col < colsize && row < rowsize) { if (row != 0 && row != (rowsize-1) && col != 0 && col != (colsize-1)) { d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)]+d_R[row * colsize + (col - 1)])/4; d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)]+d_G[row * colsize + (col - 1)])/4; d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)]+d_B[row * colsize + (col - 1)])/4; } else if (row == 0 && col != 0 && col != (colsize-1)){ d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[row * colsize + (col + 1)]+d_R[row * colsize + (col - 1)])/3; d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[row * colsize + (col + 1)]+d_G[row * colsize + (col - 1)])/3; d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[row * colsize + (col + 1)]+d_B[row * colsize + (col - 1)])/3; } else if (row == (rowsize-1) && col != 0 && col != (colsize-1)){ d_Rnew[row * colsize + col] = (d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)]+d_R[row * colsize + (col - 1)])/3; d_Gnew[row * colsize + col] = (d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)]+d_G[row * colsize + (col - 1)])/3; d_Bnew[row * colsize + col] = (d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)]+d_B[row * colsize + (col - 1)])/3; } else if (col == 0 && row != 0 && row != (rowsize-1)){ d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)])/3; d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)])/3; d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)])/3; } else if (col == (colsize-1) && row != 0 && row != (rowsize-1)){ d_Rnew[row * colsize + col] = (d_R[(row + 1) * colsize + col]+d_R[(row - 1) * colsize + col]+d_R[row * colsize + (col + 1)])/3; d_Gnew[row * colsize + col] = (d_G[(row + 1) * colsize + col]+d_G[(row - 1) * colsize + col]+d_G[row * colsize + (col + 1)])/3; d_Bnew[row * colsize + col] = (d_B[(row + 1) * colsize + col]+d_B[(row - 1) * colsize + col]+d_B[row * colsize + (col + 1)])/3; } else if (row==0 &&col==0){ d_Rnew[row * colsize + col] = (d_R[row * colsize + (col + 1)]+d_R[(row + 1) * colsize + col])/2; d_Gnew[row * colsize + col] = (d_G[row * colsize + (col + 1)]+d_G[(row + 1) * colsize + col])/2; d_Bnew[row * colsize + col] = (d_B[row * colsize + (col + 1)]+d_B[(row + 1) * colsize + col])/2; } else if (row==0 &&col==(colsize-1)){ d_Rnew[row * colsize + col] = (d_R[row * colsize + (col - 1)]+d_R[(row + 1) * colsize + col])/2; d_Gnew[row * colsize + col] = (d_G[row * colsize + (col - 1)]+d_G[(row + 1) * colsize + col])/2; d_Bnew[row * colsize + col] = (d_B[row * colsize + (col - 1)]+d_B[(row + 1) * colsize + col])/2; } else if (row==(rowsize-1) &&col==0){ d_Rnew[row * colsize + col] = (d_R[row * colsize + (col + 1)]+d_R[(row - 1) * colsize + col])/2; d_Gnew[row * colsize + col] = (d_G[row * colsize + (col + 1)]+d_G[(row - 1) * colsize + col])/2; d_Bnew[row * colsize + col] = (d_B[row * colsize + (col + 1)]+d_B[(row - 1) * colsize + col])/2; } else if (row==(rowsize-1) &&col==(colsize-1)){ d_Rnew[row * colsize + col] = (d_R[row * colsize + (col - 1)]+d_R[(row - 1) * colsize + col])/2; d_Gnew[row * colsize + col] = (d_G[row * colsize + (col - 1)]+d_G[(row - 1) * colsize + col])/2; d_Bnew[row * colsize + col] = (d_B[row * colsize + (col - 1)]+d_B[(row - 1) * colsize + col])/2; } } } // Kernel Function handles second nested for loop updates RGB values to new calculated values __global__ void kernelCopy(int *d_Rnew, int *d_Gnew, int *d_Bnew, int *d_R, int *d_G, int *d_B, int rowsize, int colsize) { // Set-up int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if (col < colsize && row < rowsize) { d_R[row * colsize + col] = d_Rnew[row * colsize + col]; d_G[row * colsize + col] = d_Gnew[row * colsize + col]; d_B[row * colsize + col] = d_Bnew[row * colsize + col]; } } void performBlurs(int *h_R, int *h_G, int *h_B, int *h_Rnew, int *h_Gnew, int *h_Bnew, int rowsize, int colsize, int nblurs) { // Assign Memory on GPU // Step 1 Assign Memory on GPU int k; int sizei = sizeof(int)*rowsize*colsize; int *d_R, *d_G, *d_B, *d_Rnew, *d_Gnew, *d_Bnew; struct timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); cudaMalloc((void **)&d_R,sizei); cudaMalloc((void **)&d_G,sizei); cudaMalloc((void **)&d_B,sizei); cudaMalloc((void **)&d_Rnew,sizei); cudaMalloc((void **)&d_Gnew,sizei); cudaMalloc((void **)&d_Bnew,sizei); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Assigning Memory to GPU > %.6lf seconds elapsed\n", t2-t1); // Transfer to Device gettimeofday(&tim, NULL); t1=tim.tv_sec+(tim.tv_usec/1000000.0); cudaMemcpy(d_R, h_R, sizei, cudaMemcpyHostToDevice); cudaMemcpy(d_G, h_G, sizei, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sizei, cudaMemcpyHostToDevice); cudaMemcpy(d_Rnew, h_Rnew, sizei, cudaMemcpyHostToDevice); cudaMemcpy(d_Gnew, h_Gnew, sizei, cudaMemcpyHostToDevice); cudaMemcpy(d_Bnew, h_Bnew, sizei, cudaMemcpyHostToDevice); t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Transferring from host to device memory > %.6lf seconds elapsed\n", t2-t1); // Set up Blocks dim3 dimGrid(ceil(colsize/(float)BW), ceil(rowsize/(float)BH), 1); dim3 dimBlock(BW,BH); nblurs = 10; // Modify as Needed gettimeofday(&tim, NULL); t1=tim.tv_sec+(tim.tv_usec/1000000.0); for (k = 0; k < nblurs; ++k) { kernelBlur<<<dimGrid, dimBlock>>>(d_Rnew, d_Gnew, d_Bnew, d_R, d_G, d_B, rowsize, colsize); kernelCopy<<<dimGrid, dimBlock>>>(d_Rnew, d_Gnew, d_Bnew, d_R, d_G, d_B, rowsize, colsize); } t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Blurring Operation > %.6lf seconds elapsed\n", t2-t1); // Step 4 output copied from GPU to Host get the RGB values cudaMemcpy(h_R, d_R, sizei, cudaMemcpyDeviceToHost); cudaMemcpy(h_G, d_G, sizei, cudaMemcpyDeviceToHost); cudaMemcpy(h_B, d_B, sizei, cudaMemcpyDeviceToHost); // Step 5 Free Memory cudaFree(d_R); cudaFree(d_G); cudaFree(d_B); cudaFree(d_Rnew); cudaFree(d_Gnew); cudaFree(d_Bnew); } int main (int argc, const char * argv[]) { // Assignment of initial Variables static int const maxlen = 200, rowsize = 521, colsize = 428, linelen = 12; static char str[200], lines[5][200]; FILE *fp, *fout; int nlines = 0; unsigned int h1, h2, h3; char *sptr; // Define Host Arrays int *h_R, *h_G, *h_B; int *h_Rnew, *h_Gnew, *h_Bnew; int size = sizeof(int) * rowsize * colsize; h_R = (int *)malloc(size); h_G = (int *)malloc(size); h_B = (int *)malloc(size); h_Rnew = (int *)malloc(size); h_Gnew = (int *)malloc(size); h_Bnew = (int *)malloc(size); // Allocate Overall Size of ROw int row = 0, col = 0, nblurs = 0, lineno=0, k; // Read input file struct timeval tim; gettimeofday(&tim, NULL); double t1=tim.tv_sec+(tim.tv_usec/1000000.0); fp = fopen("sample.ps", "r"); while(! feof(fp)) { fscanf(fp, "\n%[^\n]", str); if (nlines < 5) {strcpy((char *)lines[nlines++],(char *)str);} else{ for (sptr=&str[0];*sptr != '\0';sptr+=6){ sscanf(sptr,"%2x",&h1); sscanf(sptr+2,"%2x",&h2); sscanf(sptr+4,"%2x",&h3); if (col==colsize){ col = 0; row++; } if (row < rowsize) { h_R[row * colsize + col] = h1; h_G[row * colsize + col] = h2; h_B[row * colsize + col] = h3; } col++; } } } fclose(fp); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Reading Input File > %.6lf seconds elapsed\n", t2-t1); // Run Code performBlurs(h_R, h_G, h_B, h_Rnew, h_Gnew, h_Bnew, rowsize, colsize, nblurs); gettimeofday(&tim, NULL); t1=tim.tv_sec+(tim.tv_usec/1000000.0); fout= fopen("sampleBlurCU.ps", "w"); for (k=0;k<nlines;k++) fprintf(fout,"\n%s", lines[k]); fprintf(fout,"\n"); for(row=0;row<rowsize;row++){ for (col=0;col<colsize;col++){ fprintf(fout,"%02x%02x%02x",h_R[row * colsize + col],h_G[row * colsize + col],h_B[row * colsize + col]); lineno++; if (lineno==linelen){ fprintf(fout,"\n"); lineno = 0; } } } gettimeofday(&tim, NULL); t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("Outputting File > %.6lf seconds elapsed\n", t2-t1); fclose(fout); return 0; }
c510953f306d3526f7fff4072a1d9a608115e498.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <iostream> #define MAX_HASH 10 #define HASH_KEY(key) key%MAX_HASH using namespace std; typedef struct Node{ int id; Node* hashNext; } Node; __device__ Node *hashTable[MAX_HASH]; __global__ void cudaAddHashData(int key, Node *node){ int hash_key = HASH_KEY(key); if(hashTable[hash_key] == NULL) hashTable[hash_key] = node; else { node->hashNext = hashTable[hash_key]; hashTable[hash_key] = node; } } __global__ void cudaDelHashData(int id){ int hash_key = HASH_KEY(id); if(hashtable[hash_key] == NULL) return; Node *delNode = NULL; if(hashtable[hash_key]->id == id) { delNode = hashTbale[hash_key]; hashTable[hash_key] = hashTable[hash_key]->hashNext; } else { Node *node = hashTable[hash_key]; Node *next = node->hashNext; while(next) { if(next->id == id) { node->hashNext = next->hashNext; delNode = next; break; } node = next; next = node->hashNext; } } free(delNode); } __global__ Node *cudaFindHashData(int id){ int hash_key = HASH_KEY(id); if(hashTable[hash_key]->id == id) return hashTable[hash_key]; else { Node *node = hashtable[hash_key]; while(node->hashNext) { if(node->hashNext->id == id) return node->hashNext; node = node->hashNext; } } return NULL; } __global__ void cudaPrintAllHashData(){ printf("Print all hash data\n"); for(int i = 0; i < MAX_HASH; i++) { printf("idx:%d\n", i); if(hashTable[i] != NULL) { Node *node = hashTable[i]; while(node->hashNext) { printf("%d ", node->id); node = node->hashNext; } print("%d\n"); } } printf("\n\n"); } void cudaTestHash(){ int saveidx[101] = {0, }; for(int i = 0; i < 100; i++) { Node *node = (Node *) malloc(sizeof(Node)); node->id = rand() % 1000; node->hashNext = NULL; Node *node_gpu; hipMalloc((Node **) &node_gpu, cudaAddHashData(node->id, node); saveidx[i] = node->id; } cudaPrintAllHashData(); for(int i = 0; i < 50; i++) cudaDelHashData(saveidx[i]); cudaPrintAllHashData(); for(int i = 50; i < 100; i++) cudaDelHashData(saveidx[i]); cudaPrintAllHashData(); } void addHashData(int key, Node *node){ int hash_key = HASH_KEY(key); if(hashTable[hash_key] == NULL) hashTable[hash_key] = node; else { node->hashNext = hashTable[hash_key]; hashTable[hash_key] = node; } } void delHashData(int id){ int hash_key = HASH_KEY(id); if(hashTable[hash_key] == NULL) return; Node *delNode = NULL; if(hashTable[hash_key]->id == id) { delNode = hashTable[hash_key]; hashTable[hash_key] = hashTable[hash_key]->hashNext; } else { Node *node = hashTable[hash_key]; Node *next = node->hashNext; while(next) { if(next->id == id) { node->hashNext = next->hashNext; delNode = next; break; } node = next; next = node->hashNext; } } free(delNode); } Node *findHashData(int id){ int hash_key = HASH_KEY(id); if(hashTable[hash_key] == NULL) return NULL; if(hashTable[hash_key]->id == id) return hashTable[hash_key]; else { Node *node = hashTable[hash_key]; while(node->hashNext) { if(node->hashNext->id == id) return node->hashNext; node = node->hashNext; } } return NULL; } void printAllHashData(){ cout << "Print all hash data" << endl; for(int i = 0; i < MAX_HASH; i++) { cout << "idx:" << i << endl; if(hashTable[i] != NULL) { Node *node = hashTable[i]; while(node->hashNext) { cout << node->id << " "; node = node->hashNext; } cout << node->id << endl; } } cout << endl << endl; } void testHash(){ int saveidx[101] = {0, }; for(int i = 0; i < 100; i++) { Node *node = (Node *) malloc(sizeof(Node)); node->id = rand() % 1000; node->hashNext = NULL; addHashData(node->id, node); saveidx[i] = node->id; } printAllHashData(); for(int i = 0; i < 50; i++) delHashData(saveidx[i]); printAllHashData(); for(int i = 50; i < 100; i++) delHashData(saveidx[i]); printAllHashData(); } __global__ void sumArraysOnGPU(float *A, float *B, float *C){ int i = threadIdx.x; C[i] = A[i] + B[i]; printf("bid: %d, tid: %d, value= %f\n", blockIdx.x, threadIdx.x, C[i]); } void initialData(float *input, int size){ time_t t; srand((unsigned) time(&t)); for(int i = 0; i < size; i++) input[i] = (float) (rand() & 0xFF) / 10.0f; } int main(int argc, char** argv){ testHash(); int dev = 0; hipSetDevice(dev); int nElem = 32; printf("Vector size %d\n", nElem); size_t nBytes = nElem * sizeof(float); float *h_A; float *h_B; float *h_C; float *d_A; float *d_B; float *d_C; h_A = (float *) malloc(nBytes); h_B = (float *) malloc(nBytes); h_C = (float *) malloc(nBytes); initialData(h_A, nElem); initialData(h_B, nElem); hipMalloc((float **) &d_A, nBytes); hipMalloc((float **) &d_B, nBytes); hipMalloc((float **) &d_C, nBytes); hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); dim3 block(nElem); dim3 grid(nElem / block.x); hipLaunchKernelGGL(( sumArraysOnGPU) , dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C); printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x); hipMemcpy(h_C, d_C, nBytes, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); return 0; }
c510953f306d3526f7fff4072a1d9a608115e498.cu
#include <stdio.h> #include <cuda_runtime.h> #include <iostream> #define MAX_HASH 10 #define HASH_KEY(key) key%MAX_HASH using namespace std; typedef struct Node{ int id; Node* hashNext; } Node; __device__ Node *hashTable[MAX_HASH]; __global__ void cudaAddHashData(int key, Node *node){ int hash_key = HASH_KEY(key); if(hashTable[hash_key] == NULL) hashTable[hash_key] = node; else { node->hashNext = hashTable[hash_key]; hashTable[hash_key] = node; } } __global__ void cudaDelHashData(int id){ int hash_key = HASH_KEY(id); if(hashtable[hash_key] == NULL) return; Node *delNode = NULL; if(hashtable[hash_key]->id == id) { delNode = hashTbale[hash_key]; hashTable[hash_key] = hashTable[hash_key]->hashNext; } else { Node *node = hashTable[hash_key]; Node *next = node->hashNext; while(next) { if(next->id == id) { node->hashNext = next->hashNext; delNode = next; break; } node = next; next = node->hashNext; } } free(delNode); } __global__ Node *cudaFindHashData(int id){ int hash_key = HASH_KEY(id); if(hashTable[hash_key]->id == id) return hashTable[hash_key]; else { Node *node = hashtable[hash_key]; while(node->hashNext) { if(node->hashNext->id == id) return node->hashNext; node = node->hashNext; } } return NULL; } __global__ void cudaPrintAllHashData(){ printf("Print all hash data\n"); for(int i = 0; i < MAX_HASH; i++) { printf("idx:%d\n", i); if(hashTable[i] != NULL) { Node *node = hashTable[i]; while(node->hashNext) { printf("%d ", node->id); node = node->hashNext; } print("%d\n"); } } printf("\n\n"); } void cudaTestHash(){ int saveidx[101] = {0, }; for(int i = 0; i < 100; i++) { Node *node = (Node *) malloc(sizeof(Node)); node->id = rand() % 1000; node->hashNext = NULL; Node *node_gpu; cudaMalloc((Node **) &node_gpu, cudaAddHashData(node->id, node); saveidx[i] = node->id; } cudaPrintAllHashData(); for(int i = 0; i < 50; i++) cudaDelHashData(saveidx[i]); cudaPrintAllHashData(); for(int i = 50; i < 100; i++) cudaDelHashData(saveidx[i]); cudaPrintAllHashData(); } void addHashData(int key, Node *node){ int hash_key = HASH_KEY(key); if(hashTable[hash_key] == NULL) hashTable[hash_key] = node; else { node->hashNext = hashTable[hash_key]; hashTable[hash_key] = node; } } void delHashData(int id){ int hash_key = HASH_KEY(id); if(hashTable[hash_key] == NULL) return; Node *delNode = NULL; if(hashTable[hash_key]->id == id) { delNode = hashTable[hash_key]; hashTable[hash_key] = hashTable[hash_key]->hashNext; } else { Node *node = hashTable[hash_key]; Node *next = node->hashNext; while(next) { if(next->id == id) { node->hashNext = next->hashNext; delNode = next; break; } node = next; next = node->hashNext; } } free(delNode); } Node *findHashData(int id){ int hash_key = HASH_KEY(id); if(hashTable[hash_key] == NULL) return NULL; if(hashTable[hash_key]->id == id) return hashTable[hash_key]; else { Node *node = hashTable[hash_key]; while(node->hashNext) { if(node->hashNext->id == id) return node->hashNext; node = node->hashNext; } } return NULL; } void printAllHashData(){ cout << "Print all hash data" << endl; for(int i = 0; i < MAX_HASH; i++) { cout << "idx:" << i << endl; if(hashTable[i] != NULL) { Node *node = hashTable[i]; while(node->hashNext) { cout << node->id << " "; node = node->hashNext; } cout << node->id << endl; } } cout << endl << endl; } void testHash(){ int saveidx[101] = {0, }; for(int i = 0; i < 100; i++) { Node *node = (Node *) malloc(sizeof(Node)); node->id = rand() % 1000; node->hashNext = NULL; addHashData(node->id, node); saveidx[i] = node->id; } printAllHashData(); for(int i = 0; i < 50; i++) delHashData(saveidx[i]); printAllHashData(); for(int i = 50; i < 100; i++) delHashData(saveidx[i]); printAllHashData(); } __global__ void sumArraysOnGPU(float *A, float *B, float *C){ int i = threadIdx.x; C[i] = A[i] + B[i]; printf("bid: %d, tid: %d, value= %f\n", blockIdx.x, threadIdx.x, C[i]); } void initialData(float *input, int size){ time_t t; srand((unsigned) time(&t)); for(int i = 0; i < size; i++) input[i] = (float) (rand() & 0xFF) / 10.0f; } int main(int argc, char** argv){ testHash(); int dev = 0; cudaSetDevice(dev); int nElem = 32; printf("Vector size %d\n", nElem); size_t nBytes = nElem * sizeof(float); float *h_A; float *h_B; float *h_C; float *d_A; float *d_B; float *d_C; h_A = (float *) malloc(nBytes); h_B = (float *) malloc(nBytes); h_C = (float *) malloc(nBytes); initialData(h_A, nElem); initialData(h_B, nElem); cudaMalloc((float **) &d_A, nBytes); cudaMalloc((float **) &d_B, nBytes); cudaMalloc((float **) &d_C, nBytes); cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); dim3 block(nElem); dim3 grid(nElem / block.x); sumArraysOnGPU <<<grid, block>>> (d_A, d_B, d_C); printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x); cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); return 0; }
798aac8727246fdb49975f2658126603a886a13a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void upsampling2dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorH, const int factorW, const bool isNCHW) { // x has shape [bS, iC, iH, iW] (NCHW) or [bS, iH, iW, iC] (NHWC) // z has shape [bS, iC, factorH*iH, factorW*iW ] (NCHW) or [bS, factorH*iH, factorW*iW, iC] (NHWC) const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, dimIH; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); dimIH = isNCHW ? 2 : 1; zLen = shape::length(zShapeInfo); rank = 4; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); coords[dimIH] /= factorH; coords[dimIH + 1] /= factorW; const auto xOffset = shape::getOffset(xShapeInfo, coords); z[zOffset] = x[xOffset]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void upsampling2dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorH, const int factorW, const bool isNCHW) { hipLaunchKernelGGL(( upsampling2dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, factorH, factorW, isNCHW); } ////////////////////////////////////////////////////////////////////////// ND4J_LOCAL void ConvolutionUtils::upsampling2d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int factorH, const int factorW, const bool isNCHW) { PointersManager manager(block.launchContext(), "upsampling2d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), upsampling2dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorH, factorW, isNCHW), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } } }
798aac8727246fdb49975f2658126603a886a13a.cu
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void upsampling2dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorH, const int factorW, const bool isNCHW) { // x has shape [bS, iC, iH, iW] (NCHW) or [bS, iH, iW, iC] (NHWC) // z has shape [bS, iC, factorH*iH, factorW*iW ] (NCHW) or [bS, factorH*iH, factorW*iW, iC] (NHWC) const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, dimIH; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); dimIH = isNCHW ? 2 : 1; zLen = shape::length(zShapeInfo); rank = 4; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); coords[dimIH] /= factorH; coords[dimIH + 1] /= factorW; const auto xOffset = shape::getOffset(xShapeInfo, coords); z[zOffset] = x[xOffset]; } ////////////////////////////////////////////////////////////////////////// template <typename T> static void upsampling2dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int factorH, const int factorW, const bool isNCHW) { upsampling2dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, factorH, factorW, isNCHW); } ////////////////////////////////////////////////////////////////////////// ND4J_LOCAL void ConvolutionUtils::upsampling2d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int factorH, const int factorW, const bool isNCHW) { PointersManager manager(block.launchContext(), "upsampling2d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), upsampling2dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), factorH, factorW, isNCHW), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } } }
4662187e12015c650682e14db630273d1b2e78ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions mixed zc -> ds @author Mark Gates */ #include "common_magma.h" #define PRECISION_z #define BLK_X 64 #define BLK_Y 32 // TODO get rid of global variable! static __device__ int flag = 0; /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlat2c and zlaset. */ __global__ void zlag2c_kernel( int m, int n, const magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, double rmax ) { magmaDoubleComplex tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } } } /** Purpose ------- ZLAG2C_STREAM converts a double-complex matrix, A, to a single-complex matrix, SA. RMAX is the overflow for the single-complex arithmetic. ZLAG2C checks that all the entries of A are between -RMAX and RMAX. If not, the conversion is aborted and a flag is raised. This is the same as ZLAG2C, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of lines of the matrix A. m >= 0. @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A COMPLEX_16 array, dimension (LDA,n) On entry, the m-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,m). @param[out] SA COMPLEX array, dimension (LDSA,n) On exit, if INFO=0, the m-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,m). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value - = 1: an entry of the matrix A is greater than the COMPLEX overflow threshold, in this case, the content of SA on exit is unspecified. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zlag2c_q( magma_int_t m, magma_int_t n, magmaDoubleComplex_const_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t ldsa, magma_int_t *info, magma_queue_t queue ) { *info = 0; if ( m < 0 ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,m) ) *info = -4; else if ( ldsa < max(1,m) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( m == 0 || n == 0 ) { return; } double rmax = (double)lapackf77_slamch("O"); dim3 threads( BLK_X, 1 ); dim3 grid( (m+BLK_X-1)/BLK_X, (n+BLK_Y-1)/BLK_Y ); hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0 hipLaunchKernelGGL(( zlag2c_kernel), dim3(grid), dim3(threads), 0, queue , m, n, A, lda, SA, ldsa, rmax ); hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag } /** @see magmablas_zlag2c_q @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zlag2c( magma_int_t m, magma_int_t n, magmaDoubleComplex_const_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t ldsa, magma_int_t *info ) { magmablas_zlag2c_q( m, n, A, lda, SA, ldsa, info, magma_stream ); }
4662187e12015c650682e14db630273d1b2e78ef.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions mixed zc -> ds @author Mark Gates */ #include "common_magma.h" #define PRECISION_z #define BLK_X 64 #define BLK_Y 32 // TODO get rid of global variable! static __device__ int flag = 0; /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlat2c and zlaset. */ __global__ void zlag2c_kernel( int m, int n, const magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int ldsa, double rmax ) { magmaDoubleComplex tmp; double neg_rmax = - rmax; int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { tmp = A[j*lda]; if ( (MAGMA_Z_REAL(tmp) < neg_rmax) || (MAGMA_Z_REAL(tmp) > rmax) #if defined(PRECISION_z) || defined(PRECISION_c) || (MAGMA_Z_IMAG(tmp) < neg_rmax) || (MAGMA_Z_IMAG(tmp) > rmax) #endif ) { flag = 1; } SA[j*ldsa] = MAGMA_C_MAKE( MAGMA_Z_REAL(tmp), MAGMA_Z_IMAG(tmp) ); } } } } /** Purpose ------- ZLAG2C_STREAM converts a double-complex matrix, A, to a single-complex matrix, SA. RMAX is the overflow for the single-complex arithmetic. ZLAG2C checks that all the entries of A are between -RMAX and RMAX. If not, the conversion is aborted and a flag is raised. This is the same as ZLAG2C, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of lines of the matrix A. m >= 0. @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A COMPLEX_16 array, dimension (LDA,n) On entry, the m-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,m). @param[out] SA COMPLEX array, dimension (LDSA,n) On exit, if INFO=0, the m-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,m). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value - = 1: an entry of the matrix A is greater than the COMPLEX overflow threshold, in this case, the content of SA on exit is unspecified. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zlag2c_q( magma_int_t m, magma_int_t n, magmaDoubleComplex_const_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t ldsa, magma_int_t *info, magma_queue_t queue ) { *info = 0; if ( m < 0 ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,m) ) *info = -4; else if ( ldsa < max(1,m) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( m == 0 || n == 0 ) { return; } double rmax = (double)lapackf77_slamch("O"); dim3 threads( BLK_X, 1 ); dim3 grid( (m+BLK_X-1)/BLK_X, (n+BLK_Y-1)/BLK_Y ); cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0 zlag2c_kernel<<< grid, threads, 0, queue >>>( m, n, A, lda, SA, ldsa, rmax ); cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag } /** @see magmablas_zlag2c_q @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zlag2c( magma_int_t m, magma_int_t n, magmaDoubleComplex_const_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t ldsa, magma_int_t *info ) { magmablas_zlag2c_q( m, n, A, lda, SA, ldsa, info, magma_stream ); }
8b27c40a2c84ada47463538b1679eee11322e460.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // modify from // https://github.com/NVIDIA/TensorRT/tree/master/plugin/batchedNMSPlugin #include <vector> #include "nms/kernel.h" #include "trt_plugin_helper.hpp" template <typename T_BBOX, typename T_SCORE, bool rotated, unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void gatherNMSOutputs_kernel(const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const int *indices, const T_SCORE *scores, const T_BBOX *bboxData, T_BBOX *nmsedDets, int *nmsedLabels, int *nmsedIndex, bool clipBoxes) { if (keepTopK > topK) return; for (int i = blockIdx.x * nthds_per_cta + threadIdx.x; i < numImages * keepTopK; i += gridDim.x * nthds_per_cta) { const int imgId = i / keepTopK; const int detId = i % keepTopK; const int offset = imgId * numClasses * topK; const int index = indices[offset + detId]; const T_SCORE score = scores[offset + detId]; if (index == -1) { nmsedLabels[i] = -1; if (nmsedIndex != nullptr) { nmsedIndex[i] = -1; } if (rotated) { nmsedDets[i * 6] = 0; nmsedDets[i * 6 + 1] = 0; nmsedDets[i * 6 + 2] = 0; nmsedDets[i * 6 + 3] = 0; nmsedDets[i * 6 + 4] = 0; nmsedDets[i * 6 + 5] = 0; } else { nmsedDets[i * 5] = 0; nmsedDets[i * 5 + 1] = 0; nmsedDets[i * 5 + 2] = 0; nmsedDets[i * 5 + 3] = 0; nmsedDets[i * 5 + 4] = 0; } } else { const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass)); nmsedLabels[i] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label if (rotated) { const int bboxId = ((shareLocation ? (index % numPredsPerClass) : index % (numClasses * numPredsPerClass)) + bboxOffset) * 5; if (nmsedIndex != nullptr) { nmsedIndex[i] = bboxId / 5; } // clipped bbox xmin nmsedDets[i * 6] = clipBoxes ? max(min(bboxData[bboxId], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId]; // clipped bbox ymin nmsedDets[i * 6 + 1] = clipBoxes ? max(min(bboxData[bboxId + 1], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 1]; // clipped bbox xmax nmsedDets[i * 6 + 2] = clipBoxes ? max(min(bboxData[bboxId + 2], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 2]; // clipped bbox ymax nmsedDets[i * 6 + 3] = clipBoxes ? max(min(bboxData[bboxId + 3], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 3]; // clipped bbox angle nmsedDets[i * 6 + 4] = clipBoxes ? max(min(bboxData[bboxId + 4], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 4]; nmsedDets[i * 6 + 5] = score; } else { const int bboxId = ((shareLocation ? (index % numPredsPerClass) : index % (numClasses * numPredsPerClass)) + bboxOffset) * 4; if (nmsedIndex != nullptr) { nmsedIndex[i] = bboxId / 4; } // clipped bbox xmin nmsedDets[i * 5] = clipBoxes ? max(min(bboxData[bboxId], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId]; // clipped bbox ymin nmsedDets[i * 5 + 1] = clipBoxes ? max(min(bboxData[bboxId + 1], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 1]; // clipped bbox xmax nmsedDets[i * 5 + 2] = clipBoxes ? max(min(bboxData[bboxId + 2], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 2]; // clipped bbox ymax nmsedDets[i * 5 + 3] = clipBoxes ? max(min(bboxData[bboxId + 3], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 3]; nmsedDets[i * 5 + 4] = score; } } } } template <typename T_BBOX, typename T_SCORE, bool rotated> pluginStatus_t gatherNMSOutputs_gpu(hipStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const void *indices, const void *scores, const void *bboxData, void *nmsedDets, void *nmsedLabels, void *nmsedIndex, bool clipBoxes) { const int BS = 32; const int GS = 32; hipLaunchKernelGGL(( gatherNMSOutputs_kernel<T_BBOX, T_SCORE, rotated, BS>), dim3(GS), dim3(BS), 0, stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, (int *)indices, (T_SCORE *)scores, (T_BBOX *)bboxData, (T_BBOX *)nmsedDets, (int *)nmsedLabels, (int *)nmsedIndex, clipBoxes); CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // gatherNMSOutputs LAUNCH CONFIG {{{ typedef pluginStatus_t (*nmsOutFunc)(hipStream_t, const bool, const int, const int, const int, const int, const int, const void *, const void *, const void *, void *, void *, void *, bool); struct nmsOutLaunchConfig { DataType t_bbox; DataType t_score; bool rotated; nmsOutFunc function; nmsOutLaunchConfig(DataType t_bbox, DataType t_score, bool rotated) : t_bbox(t_bbox), t_score(t_score), rotated(rotated) {} nmsOutLaunchConfig(DataType t_bbox, DataType t_score, bool rotated, nmsOutFunc function) : t_bbox(t_bbox), t_score(t_score), rotated(rotated), function(function) {} bool operator==(const nmsOutLaunchConfig &other) { return t_bbox == other.t_bbox && t_score == other.t_score && rotated == other.rotated; } }; using nvinfer1::DataType; static std::vector<nmsOutLaunchConfig> nmsOutFuncVec; bool nmsOutputInit() { nmsOutFuncVec.push_back(nmsOutLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, false, gatherNMSOutputs_gpu<float, float, false>)); nmsOutFuncVec.push_back(nmsOutLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, true, gatherNMSOutputs_gpu<float, float, true>)); return true; } static bool initialized = nmsOutputInit(); pluginStatus_t gatherNMSOutputs(hipStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const DataType DT_BBOX, const DataType DT_SCORE, const void *indices, const void *scores, const void *bboxData, void *nmsedDets, void *nmsedLabels, void *nmsedIndex, bool clipBoxes, bool rotated) { nmsOutLaunchConfig lc = nmsOutLaunchConfig(DT_BBOX, DT_SCORE, rotated); for (unsigned i = 0; i < nmsOutFuncVec.size(); ++i) { if (lc == nmsOutFuncVec[i]) { DEBUG_PRINTF("gatherNMSOutputs kernel %d\n", i); return nmsOutFuncVec[i].function(stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, indices, scores, bboxData, nmsedDets, nmsedLabels, nmsedIndex, clipBoxes); } } return STATUS_BAD_PARAM; }
8b27c40a2c84ada47463538b1679eee11322e460.cu
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // modify from // https://github.com/NVIDIA/TensorRT/tree/master/plugin/batchedNMSPlugin #include <vector> #include "nms/kernel.h" #include "trt_plugin_helper.hpp" template <typename T_BBOX, typename T_SCORE, bool rotated, unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void gatherNMSOutputs_kernel(const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const int *indices, const T_SCORE *scores, const T_BBOX *bboxData, T_BBOX *nmsedDets, int *nmsedLabels, int *nmsedIndex, bool clipBoxes) { if (keepTopK > topK) return; for (int i = blockIdx.x * nthds_per_cta + threadIdx.x; i < numImages * keepTopK; i += gridDim.x * nthds_per_cta) { const int imgId = i / keepTopK; const int detId = i % keepTopK; const int offset = imgId * numClasses * topK; const int index = indices[offset + detId]; const T_SCORE score = scores[offset + detId]; if (index == -1) { nmsedLabels[i] = -1; if (nmsedIndex != nullptr) { nmsedIndex[i] = -1; } if (rotated) { nmsedDets[i * 6] = 0; nmsedDets[i * 6 + 1] = 0; nmsedDets[i * 6 + 2] = 0; nmsedDets[i * 6 + 3] = 0; nmsedDets[i * 6 + 4] = 0; nmsedDets[i * 6 + 5] = 0; } else { nmsedDets[i * 5] = 0; nmsedDets[i * 5 + 1] = 0; nmsedDets[i * 5 + 2] = 0; nmsedDets[i * 5 + 3] = 0; nmsedDets[i * 5 + 4] = 0; } } else { const int bboxOffset = imgId * (shareLocation ? numPredsPerClass : (numClasses * numPredsPerClass)); nmsedLabels[i] = (index % (numClasses * numPredsPerClass)) / numPredsPerClass; // label if (rotated) { const int bboxId = ((shareLocation ? (index % numPredsPerClass) : index % (numClasses * numPredsPerClass)) + bboxOffset) * 5; if (nmsedIndex != nullptr) { nmsedIndex[i] = bboxId / 5; } // clipped bbox xmin nmsedDets[i * 6] = clipBoxes ? max(min(bboxData[bboxId], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId]; // clipped bbox ymin nmsedDets[i * 6 + 1] = clipBoxes ? max(min(bboxData[bboxId + 1], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 1]; // clipped bbox xmax nmsedDets[i * 6 + 2] = clipBoxes ? max(min(bboxData[bboxId + 2], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 2]; // clipped bbox ymax nmsedDets[i * 6 + 3] = clipBoxes ? max(min(bboxData[bboxId + 3], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 3]; // clipped bbox angle nmsedDets[i * 6 + 4] = clipBoxes ? max(min(bboxData[bboxId + 4], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 4]; nmsedDets[i * 6 + 5] = score; } else { const int bboxId = ((shareLocation ? (index % numPredsPerClass) : index % (numClasses * numPredsPerClass)) + bboxOffset) * 4; if (nmsedIndex != nullptr) { nmsedIndex[i] = bboxId / 4; } // clipped bbox xmin nmsedDets[i * 5] = clipBoxes ? max(min(bboxData[bboxId], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId]; // clipped bbox ymin nmsedDets[i * 5 + 1] = clipBoxes ? max(min(bboxData[bboxId + 1], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 1]; // clipped bbox xmax nmsedDets[i * 5 + 2] = clipBoxes ? max(min(bboxData[bboxId + 2], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 2]; // clipped bbox ymax nmsedDets[i * 5 + 3] = clipBoxes ? max(min(bboxData[bboxId + 3], T_BBOX(1.)), T_BBOX(0.)) : bboxData[bboxId + 3]; nmsedDets[i * 5 + 4] = score; } } } } template <typename T_BBOX, typename T_SCORE, bool rotated> pluginStatus_t gatherNMSOutputs_gpu(cudaStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const void *indices, const void *scores, const void *bboxData, void *nmsedDets, void *nmsedLabels, void *nmsedIndex, bool clipBoxes) { const int BS = 32; const int GS = 32; gatherNMSOutputs_kernel<T_BBOX, T_SCORE, rotated, BS><<<GS, BS, 0, stream>>>( shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, (int *)indices, (T_SCORE *)scores, (T_BBOX *)bboxData, (T_BBOX *)nmsedDets, (int *)nmsedLabels, (int *)nmsedIndex, clipBoxes); CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } // gatherNMSOutputs LAUNCH CONFIG {{{ typedef pluginStatus_t (*nmsOutFunc)(cudaStream_t, const bool, const int, const int, const int, const int, const int, const void *, const void *, const void *, void *, void *, void *, bool); struct nmsOutLaunchConfig { DataType t_bbox; DataType t_score; bool rotated; nmsOutFunc function; nmsOutLaunchConfig(DataType t_bbox, DataType t_score, bool rotated) : t_bbox(t_bbox), t_score(t_score), rotated(rotated) {} nmsOutLaunchConfig(DataType t_bbox, DataType t_score, bool rotated, nmsOutFunc function) : t_bbox(t_bbox), t_score(t_score), rotated(rotated), function(function) {} bool operator==(const nmsOutLaunchConfig &other) { return t_bbox == other.t_bbox && t_score == other.t_score && rotated == other.rotated; } }; using nvinfer1::DataType; static std::vector<nmsOutLaunchConfig> nmsOutFuncVec; bool nmsOutputInit() { nmsOutFuncVec.push_back(nmsOutLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, false, gatherNMSOutputs_gpu<float, float, false>)); nmsOutFuncVec.push_back(nmsOutLaunchConfig(DataType::kFLOAT, DataType::kFLOAT, true, gatherNMSOutputs_gpu<float, float, true>)); return true; } static bool initialized = nmsOutputInit(); pluginStatus_t gatherNMSOutputs(cudaStream_t stream, const bool shareLocation, const int numImages, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const DataType DT_BBOX, const DataType DT_SCORE, const void *indices, const void *scores, const void *bboxData, void *nmsedDets, void *nmsedLabels, void *nmsedIndex, bool clipBoxes, bool rotated) { nmsOutLaunchConfig lc = nmsOutLaunchConfig(DT_BBOX, DT_SCORE, rotated); for (unsigned i = 0; i < nmsOutFuncVec.size(); ++i) { if (lc == nmsOutFuncVec[i]) { DEBUG_PRINTF("gatherNMSOutputs kernel %d\n", i); return nmsOutFuncVec[i].function(stream, shareLocation, numImages, numPredsPerClass, numClasses, topK, keepTopK, indices, scores, bboxData, nmsedDets, nmsedLabels, nmsedIndex, clipBoxes); } } return STATUS_BAD_PARAM; }
d2c36a17f9c3db3421e299d73c25355c0579fa2a.hip
// !!! This is a file automatically generated by hipify!!! #include "TaskExecutor.hpp" #include "kernels/ActivationKernel.cuh" #include "kernels/AdamKernel.cuh" #include "kernels/SoftmaxKernel.cuh" #include "kernels/BackwardDeltaKernel.cuh" #include "kernels/GradientIncrementKernel.cuh" #include "kernels/MatrixFillKernel.cuh" #include "kernels/MatrixScaleKernel.cuh" #include "kernels/TransposeKernel.cuh" #include "kernels/WeightedIncrementKernel.cuh" #include "kernels/ErrorMeasureKernel.cuh" #include "Util.cuh" #include <hip/hip_runtime.h> using namespace rnn; using namespace rnn::cuda; struct TaskExecutor::TaskExecutorImpl { hipStream_t stream; TaskExecutorImpl() { hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); } ~TaskExecutorImpl() { hipStreamDestroy(stream); } void Execute(const Task &t) { hipError_t err; switch(t.type) { case TaskType::LAYER_ACTIVATION: if (t.data.layerActivationData.activation == LayerActivation::SOFTMAX) { SoftmaxKernel::Apply(t.data.layerActivationData.layer, stream); } else { ActivationKernel::Apply( t.data.layerActivationData.layer, t.data.layerActivationData.activation, stream); } return; case TaskType::ERROR_MEASURE: ErrorMeasureKernel::Apply(t.data.errorMeasureData.networkOutput, t.data.errorMeasureData.targetOutput, t.data.errorMeasureData.deltaMask, t.data.errorMeasureData.outputLayer, stream); return; case TaskType::PROPAGATE_DELTA: BackwardDeltaKernel::Apply(t.data.propagateDeltaData.nextDelta, t.data.propagateDeltaData.transposedWeights, t.data.propagateDeltaData.connection, t.data.propagateDeltaData.outDelta, stream); return; case TaskType::GRADIENT_INCREMENT: GradientIncrementKernel::Apply(t.data.gradientIncrementData.layerDeltas, t.data.gradientIncrementData.connection, t.data.gradientIncrementData.outGradient, stream); return; case TaskType::FILL_MATRIX: MatrixFillKernel::Apply(t.data.fillMatrixData.target, t.data.fillMatrixData.value, stream); return; case TaskType::SCALE_MATRIX: MatrixScaleKernel::Apply(t.data.scaleMatrixData.target, t.data.scaleMatrixData.scale, stream); return; case TaskType::TRANSPOSE_MATRIX: TransposeKernel::Apply(t.data.transposeMatrixData.src, t.data.transposeMatrixData.dst, stream); return; case TaskType::FORWARD_INCREMENT: WeightedIncrementKernel::Apply(t.data.forwardIncrementData.layerWeights, t.data.forwardIncrementData.input, t.data.forwardIncrementData.output, stream); return; case TaskType::ADAM_UPDATE: AdamKernel::UpdateMomentumAndRMS( t.data.adamUpdateData.gradient, t.data.adamUpdateData.momentum, t.data.adamUpdateData.rms, t.data.adamUpdateData.beta1, t.data.adamUpdateData.beta2, stream); return; case TaskType::ADAM_INCREMENT: AdamKernel::UpdateWeightsWithAdam( t.data.adamIncrementData.weights, t.data.adamIncrementData.momentum, t.data.adamIncrementData.rms, t.data.adamIncrementData.beta1, t.data.adamIncrementData.beta2, t.data.adamIncrementData.lr, t.data.adamIncrementData.epsilon, stream); return; case TaskType::COPY_MATRIX_D2H: err = hipMemcpy2DAsync( t.data.copyMatrixD2HData.dst.data, t.data.copyMatrixD2HData.dst.cols * sizeof(float), t.data.copyMatrixD2HData.src.data, t.data.copyMatrixD2HData.src.pitch, t.data.copyMatrixD2HData.src.cols * sizeof(float), t.data.copyMatrixD2HData.src.rows, hipMemcpyDeviceToHost, stream); CheckError(err); return; case TaskType::COPY_MATRIX_H2D: err = hipMemcpy2DAsync( t.data.copyMatrixH2DData.dst.data, t.data.copyMatrixH2DData.dst.pitch, t.data.copyMatrixH2DData.src.data, t.data.copyMatrixH2DData.src.cols * sizeof(float), t.data.copyMatrixH2DData.src.cols * sizeof(float), t.data.copyMatrixH2DData.src.rows, hipMemcpyHostToDevice, stream); CheckError(err); return; case TaskType::COPY_MATRIX_D2D: err = hipMemcpy2DAsync( t.data.copyMatrixD2DData.dst.data, t.data.copyMatrixD2DData.dst.pitch, t.data.copyMatrixD2DData.src.data, t.data.copyMatrixD2DData.src.pitch, t.data.copyMatrixD2DData.src.cols * sizeof(float), t.data.copyMatrixD2DData.src.rows, hipMemcpyDeviceToDevice, stream); CheckError(err); return; default: assert(false); } } }; TaskExecutor::TaskExecutor() : impl(new TaskExecutorImpl()) {} TaskExecutor::~TaskExecutor() = default; void TaskExecutor::Execute(const Task &task) { impl->Execute(task); }
d2c36a17f9c3db3421e299d73c25355c0579fa2a.cu
#include "TaskExecutor.hpp" #include "kernels/ActivationKernel.cuh" #include "kernels/AdamKernel.cuh" #include "kernels/SoftmaxKernel.cuh" #include "kernels/BackwardDeltaKernel.cuh" #include "kernels/GradientIncrementKernel.cuh" #include "kernels/MatrixFillKernel.cuh" #include "kernels/MatrixScaleKernel.cuh" #include "kernels/TransposeKernel.cuh" #include "kernels/WeightedIncrementKernel.cuh" #include "kernels/ErrorMeasureKernel.cuh" #include "Util.cuh" #include <cuda_runtime.h> using namespace rnn; using namespace rnn::cuda; struct TaskExecutor::TaskExecutorImpl { cudaStream_t stream; TaskExecutorImpl() { cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); } ~TaskExecutorImpl() { cudaStreamDestroy(stream); } void Execute(const Task &t) { cudaError_t err; switch(t.type) { case TaskType::LAYER_ACTIVATION: if (t.data.layerActivationData.activation == LayerActivation::SOFTMAX) { SoftmaxKernel::Apply(t.data.layerActivationData.layer, stream); } else { ActivationKernel::Apply( t.data.layerActivationData.layer, t.data.layerActivationData.activation, stream); } return; case TaskType::ERROR_MEASURE: ErrorMeasureKernel::Apply(t.data.errorMeasureData.networkOutput, t.data.errorMeasureData.targetOutput, t.data.errorMeasureData.deltaMask, t.data.errorMeasureData.outputLayer, stream); return; case TaskType::PROPAGATE_DELTA: BackwardDeltaKernel::Apply(t.data.propagateDeltaData.nextDelta, t.data.propagateDeltaData.transposedWeights, t.data.propagateDeltaData.connection, t.data.propagateDeltaData.outDelta, stream); return; case TaskType::GRADIENT_INCREMENT: GradientIncrementKernel::Apply(t.data.gradientIncrementData.layerDeltas, t.data.gradientIncrementData.connection, t.data.gradientIncrementData.outGradient, stream); return; case TaskType::FILL_MATRIX: MatrixFillKernel::Apply(t.data.fillMatrixData.target, t.data.fillMatrixData.value, stream); return; case TaskType::SCALE_MATRIX: MatrixScaleKernel::Apply(t.data.scaleMatrixData.target, t.data.scaleMatrixData.scale, stream); return; case TaskType::TRANSPOSE_MATRIX: TransposeKernel::Apply(t.data.transposeMatrixData.src, t.data.transposeMatrixData.dst, stream); return; case TaskType::FORWARD_INCREMENT: WeightedIncrementKernel::Apply(t.data.forwardIncrementData.layerWeights, t.data.forwardIncrementData.input, t.data.forwardIncrementData.output, stream); return; case TaskType::ADAM_UPDATE: AdamKernel::UpdateMomentumAndRMS( t.data.adamUpdateData.gradient, t.data.adamUpdateData.momentum, t.data.adamUpdateData.rms, t.data.adamUpdateData.beta1, t.data.adamUpdateData.beta2, stream); return; case TaskType::ADAM_INCREMENT: AdamKernel::UpdateWeightsWithAdam( t.data.adamIncrementData.weights, t.data.adamIncrementData.momentum, t.data.adamIncrementData.rms, t.data.adamIncrementData.beta1, t.data.adamIncrementData.beta2, t.data.adamIncrementData.lr, t.data.adamIncrementData.epsilon, stream); return; case TaskType::COPY_MATRIX_D2H: err = cudaMemcpy2DAsync( t.data.copyMatrixD2HData.dst.data, t.data.copyMatrixD2HData.dst.cols * sizeof(float), t.data.copyMatrixD2HData.src.data, t.data.copyMatrixD2HData.src.pitch, t.data.copyMatrixD2HData.src.cols * sizeof(float), t.data.copyMatrixD2HData.src.rows, cudaMemcpyDeviceToHost, stream); CheckError(err); return; case TaskType::COPY_MATRIX_H2D: err = cudaMemcpy2DAsync( t.data.copyMatrixH2DData.dst.data, t.data.copyMatrixH2DData.dst.pitch, t.data.copyMatrixH2DData.src.data, t.data.copyMatrixH2DData.src.cols * sizeof(float), t.data.copyMatrixH2DData.src.cols * sizeof(float), t.data.copyMatrixH2DData.src.rows, cudaMemcpyHostToDevice, stream); CheckError(err); return; case TaskType::COPY_MATRIX_D2D: err = cudaMemcpy2DAsync( t.data.copyMatrixD2DData.dst.data, t.data.copyMatrixD2DData.dst.pitch, t.data.copyMatrixD2DData.src.data, t.data.copyMatrixD2DData.src.pitch, t.data.copyMatrixD2DData.src.cols * sizeof(float), t.data.copyMatrixD2DData.src.rows, cudaMemcpyDeviceToDevice, stream); CheckError(err); return; default: assert(false); } } }; TaskExecutor::TaskExecutor() : impl(new TaskExecutorImpl()) {} TaskExecutor::~TaskExecutor() = default; void TaskExecutor::Execute(const Task &task) { impl->Execute(task); }
adac7b8f6cac0fc0a887f09313efb0228548cb15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * makeProjection() * Forms E and E^T matrices from eigenvectors * float** eT: E^T, populated by function * float** e: E, populated by function * float** eigenvec: matrix of eigenvectors, unsorted * int* indices: indices to accept from eigenvectors * int N: degrees of freedom */ __global__ void makeProjection( float *eT, float *e, float *eigenvec, int *indices, int M, int N ) { int elementNum = blockIdx.x * blockDim.x + threadIdx.x; if( elementNum >= M * N ) { return; } int m = elementNum / N; int n = elementNum % N; e[n * M + m] = eigenvec[n * M + indices[m]]; eT[m * N + n] = e[n * M + m]; }
adac7b8f6cac0fc0a887f09313efb0228548cb15.cu
/* * makeProjection() * Forms E and E^T matrices from eigenvectors * float** eT: E^T, populated by function * float** e: E, populated by function * float** eigenvec: matrix of eigenvectors, unsorted * int* indices: indices to accept from eigenvectors * int N: degrees of freedom */ __global__ void makeProjection( float *eT, float *e, float *eigenvec, int *indices, int M, int N ) { int elementNum = blockIdx.x * blockDim.x + threadIdx.x; if( elementNum >= M * N ) { return; } int m = elementNum / N; int n = elementNum % N; e[n * M + m] = eigenvec[n * M + indices[m]]; eT[m * N + n] = e[n * M + m]; }
cde3d4e79ff487837366ae7e5c6f261603da1235.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include "../shader.cu" #define EPS 0.001 __device__ float map(vec3 p){ vec2 pxz = vec2(p.x,p.z); float plane = p.y+0.15*sin(frame/15 + length(pxz)*5)*1/p.x*1/p.z+0.5; //float cube = length(max(abs(vec3(p.x,p.y+ sinfr,p.z))-vec3(0.5),0.0))-0.3; float ball = dist(p,vec3(0)+vec3(0,1+sinfr*1.5 ,0))-1; return smin(ball,plane,0.9 ); } __device__ vec3 normal(vec3 p){ vec3 q = vec3(map(vec3(p.x + EPS, p.y, p.z)) - map(vec3(p.x - EPS, p.y, p.z)), map(vec3(p.x, p.y + EPS, p.z )) - map(vec3(p.x, p.y - EPS, p.z)), map(vec3(p.x, p.y, p.z + EPS)) - map(vec3(p.x, p.y, p.z - EPS))); return normalize(q); } __device__ float trace(vec3 org, vec3 dir){ float dist = 0.0; float d; for(int i = 0; i < 240; i++) { vec3 p = org+dir*dist; d = map(p); if( d <= 0.01){ break; } dist += d; } return dist; } __global__ void Mandel_calc(unsigned char* image_buffer){ unsigned short int row = (blockIdx.y * blockDim.y + threadIdx.y); // WIDTH unsigned short int col = (blockIdx.x * blockDim.x + threadIdx.x); // HEIGHT unsigned int idx = 3*(row * window.x + col); float y0 = - (float) (row -window.x/2)/(window.x/2); float x0 = (float) (col -window.y/2)/(window.y/2); float r,g,b; vec3 direction = normalize(vec3(x0,y0, 1)); vec3 light = vec3(sinfr*2,5.0+3*sinfr,-2.0); vec3 origin = vec3(1,1,-3); float dist = trace(origin,direction); vec3 p = origin + direction*dist; vec3 norm = normal(p); //double f = dot(direction, norm); vec3 reflection = direction - norm* 2 * dot(direction, norm); vec3 c3po = vec3(0.8,1.0,0.8); c3po = c3po * dot(norm, normalize(light-p)); float spec = pow(max(0.0,dot(reflection,normalize(light-p))),10); vec3 ambient = vec3(0.3,0.4,0.75); c3po = c3po + ambient + vec3(1,1,1); r = c3po.x*100; g = c3po.y*100; b = c3po.z*100; color(r,g,b,&image_buffer[idx]); } #include "../main.cu"
cde3d4e79ff487837366ae7e5c6f261603da1235.cu
#include <math.h> #include <stdio.h> #include "../shader.cu" #define EPS 0.001 __device__ float map(vec3 p){ vec2 pxz = vec2(p.x,p.z); float plane = p.y+0.15*sin(frame/15 + length(pxz)*5)*1/p.x*1/p.z+0.5; //float cube = length(max(abs(vec3(p.x,p.y+ sinfr,p.z))-vec3(0.5),0.0))-0.3; float ball = dist(p,vec3(0)+vec3(0,1+sinfr*1.5 ,0))-1; return smin(ball,plane,0.9 ); } __device__ vec3 normal(vec3 p){ vec3 q = vec3(map(vec3(p.x + EPS, p.y, p.z)) - map(vec3(p.x - EPS, p.y, p.z)), map(vec3(p.x, p.y + EPS, p.z )) - map(vec3(p.x, p.y - EPS, p.z)), map(vec3(p.x, p.y, p.z + EPS)) - map(vec3(p.x, p.y, p.z - EPS))); return normalize(q); } __device__ float trace(vec3 org, vec3 dir){ float dist = 0.0; float d; for(int i = 0; i < 240; i++) { vec3 p = org+dir*dist; d = map(p); if( d <= 0.01){ break; } dist += d; } return dist; } __global__ void Mandel_calc(unsigned char* image_buffer){ unsigned short int row = (blockIdx.y * blockDim.y + threadIdx.y); // WIDTH unsigned short int col = (blockIdx.x * blockDim.x + threadIdx.x); // HEIGHT unsigned int idx = 3*(row * window.x + col); float y0 = - (float) (row -window.x/2)/(window.x/2); float x0 = (float) (col -window.y/2)/(window.y/2); float r,g,b; vec3 direction = normalize(vec3(x0,y0, 1)); vec3 light = vec3(sinfr*2,5.0+3*sinfr,-2.0); vec3 origin = vec3(1,1,-3); float dist = trace(origin,direction); vec3 p = origin + direction*dist; vec3 norm = normal(p); //double f = dot(direction, norm); vec3 reflection = direction - norm* 2 * dot(direction, norm); vec3 c3po = vec3(0.8,1.0,0.8); c3po = c3po * dot(norm, normalize(light-p)); float spec = pow(max(0.0,dot(reflection,normalize(light-p))),10); vec3 ambient = vec3(0.3,0.4,0.75); c3po = c3po + ambient + vec3(1,1,1); r = c3po.x*100; g = c3po.y*100; b = c3po.z*100; color(r,g,b,&image_buffer[idx]); } #include "../main.cu"
d0889a8848cb52c4d4ce257d9552dd49da940963.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "gpu_routines.h" #include <hip/hip_runtime.h> #define BLOCK 32 #define N_COL 30 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){ if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void positionalCorrection_Kernel(Circle * circles, int n, Collision * colls, unsigned int * n_cols) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < (*n_cols)){ Collision c = colls[idx]; Circle A = circles[c.A]; Circle B = circles[c.B]; float k_slop = 0.05f; // Penetration allowance //float percent = 0.4f; // Penetration percentage to correct float corr_aux = 0.4f * (max(c.penetration - k_slop, 0.0f) / (A.inv_mass + B.inv_mass)); float corr_x = corr_aux * c.normal_x; float corr_y = corr_aux * c.normal_y; atomicAdd(&circles[c.A].px, (-1.0f) * (corr_x * A.inv_mass)); atomicAdd(&circles[c.A].py, (-1.0f) * (corr_y * A.inv_mass)); atomicAdd(&circles[c.B].px, (corr_x * B.inv_mass)); atomicAdd(&circles[c.B].py, (corr_y * B.inv_mass)); } } __global__ void solveCollisions_Kernel(Circle * circles, int n, Collision * colls, unsigned int * n_cols, int iterations, float gravity, float dt) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < (*n_cols)){ Collision c = colls[idx]; Circle A = circles[c.A]; Circle B = circles[c.B]; float Avx = A.vx, Avy = A.vy, AvA = A.angularVelocity; float Bvx = B.vx, Bvy = B.vy, BvA = B.angularVelocity; float rax = c.contact_x - A.px; float ray = c.contact_y - A.py; float rbx = c.contact_x - B.px; float rby = c.contact_y - B.py; float rvx = Bvx - (BvA * rby) - Avx + (AvA * ray); //ERROR POSIBLE float rvy = Bvy + (BvA * rbx) - Avy - (AvA * rax); float contact_vel = rvx * c.normal_x + rvy * c.normal_y; if(contact_vel > 0.0f) return; float raCrossN = (rax * c.normal_y) - (ray * c.normal_x); float rbCrossN = (rbx * c.normal_y) - (rby * c.normal_x); float invMassSum = A.inv_mass + B.inv_mass + raCrossN*raCrossN * A.inv_inertia + rbCrossN*rbCrossN * B.inv_inertia; float e = 0.2f; if((rvx * rvx + rvy * rvy) < ((dt * gravity * dt * gravity) + EPS)) e = 0.0f; float j = -(1.0f + e) * contact_vel; j /= invMassSum; float impulse_x = c.normal_x * j; float impulse_y = c.normal_y * j; Avx += A.inv_mass * (-impulse_x); Avy += A.inv_mass * (-impulse_y); AvA += A.inv_inertia * ((rax * (-impulse_y)) - (ray * (-impulse_x))); Bvx += B.inv_mass * (impulse_x); Bvy += B.inv_mass * (impulse_y); BvA += B.inv_inertia * ((rbx * (impulse_y)) - (rby * (impulse_x))); atomicAdd(&circles[c.A].vx, (Avx - A.vx)); atomicAdd(&circles[c.A].vy, (Avy - A.vy)); atomicAdd(&circles[c.A].angularVelocity, (AvA - A.angularVelocity)); atomicAdd(&circles[c.B].vx, (Bvx - B.vx)); atomicAdd(&circles[c.B].vy, (Bvy - B.vy)); atomicAdd(&circles[c.B].angularVelocity, (BvA - B.angularVelocity)); } } __global__ void calculateContacs_Kernel(Circle * circles, int n, Collision * colls, unsigned int * n_cols) { int i = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; int bloc = blockDim.x * blockIdx.x; __shared__ Circle sh_Circles[BLOCK]; //Circle ri; Collision c; float ripx, ripy, riinv_mass, riradius; float rjpx, rjpy, rjinv_mass, rjradius; if(i < n){ //ri = circles[i]; ripx = circles[i].px; ripy = circles[i].py; riinv_mass = circles[i].inv_mass; riradius = circles[i].radius; } for (unsigned int j = bloc; j < n; j += BLOCK){ if(j + tid < n) sh_Circles[tid] = circles[j + tid]; //__syncthreads(); for(int k = 0; k < BLOCK && j + k < n; ++k){ if(j + k <= i) continue; //Circle rj = sh_Circles[k]; rjinv_mass = sh_Circles[k].inv_mass; rjpx = sh_Circles[k].px; rjpy = sh_Circles[k].py; rjradius = sh_Circles[k].radius; if(riinv_mass == 0.0f && rjinv_mass == 0.0f) continue; c.A = i; c.B = j + k; c.normal_x = rjpx - ripx; c.normal_y = rjpy - ripy; //c.normal_x = __fsub_rd(rjpx, ripx); //c.normal_y = __fsub_rd(rjpy, ripy); float suma_radius = riradius + rjradius; //float suma_radius = __fadd_rd(riradius, rjradius); float squared_dist = c.normal_x * c.normal_x + c.normal_y * c.normal_y; //float suma_radius = ri.radius + rj.radius; if(squared_dist > suma_radius * suma_radius) continue; //Not contact float dist = sqrtf(squared_dist); float inv_dist = __frcp_rd(dist); if(dist < EPS) { c.penetration = riradius; c.normal_x = 1.0f; c.normal_y = 0.0f; c.contact_x = ripx; c.contact_y = ripy; } else{ c.penetration = suma_radius - dist; //c.penetration = __fsub_rd(suma_radius, dist); c.normal_x *= inv_dist; c.normal_y *= inv_dist; //c.normal_x = __fmul_rd(c.normal_x, inv_dist); //c.normal_y = __fmul_rd(c.normal_y, inv_dist); c.contact_x = c.normal_x * riradius + ripx; c.contact_y = c.normal_y * riradius + ripy; //c.contact_x = __fmaf_rd(c.normal_x, riradius, ripx); //c.contact_y = __fmaf_rd(c.normal_y, riradius, ripy); } //int idx = *n_cols; //(*n_cols)++; int idx = atomicInc(n_cols, 1e9); //Faster than atomicAdd(n_cols, 1)?? colls[idx] = c; } //__syncthreads(); } } __global__ void integrateVelocities_Kernel(Circle * circles, int n, float gravity, float dt) { int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < n){ //Circle c = circles[i]; its faster with more global reads?? if(circles[i].inv_mass > 0.0f){ circles[i].px += circles[i].vx * dt; circles[i].py += circles[i].vy * dt; circles[i].vy += gravity * (dt / 2.0f); } //circles[i] = c; } } __global__ void integrateForces_Kernel(Circle * circles, int n, float gravity, float dt) { int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < n){ //Circle c = circles[i]; if(circles[i].inv_mass > 0.0f) circles[i].vy += gravity * (dt / 2.0f); //circles[i] = c; } } __global__ void initialize_Kernel() { printf("GPU initialized\n"); } void GPU::positionalCorrection_GPU(){ dim3 dimGrid(ceil((float)this->n_cols / BLOCK)); dim3 dimBlock(BLOCK); hipLaunchKernelGGL(( positionalCorrection_Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, circles_GPU, this->lro->size(), colls_GPU, this->n_cols_GPU); hipDeviceSynchronize(); //hipFree(colls_GPU); //hipFree(n_cols_GPU); } void GPU::solveCollisions_GPU(vector<Collision> &contacts){ dim3 dimGrid(ceil((float)this->n_cols / BLOCK)); dim3 dimBlock(BLOCK); for(int i = 0; i < iterations; ++i){ hipLaunchKernelGGL(( solveCollisions_Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, circles_GPU, this->lro->size(), colls_GPU, this->n_cols_GPU, iterations, gravity, dt); hipDeviceSynchronize(); } /* contacts.resize(n_cols); hipMemcpy(&(contacts[0]), &colls_GPU[0], sizeof(Collision) * this->n_cols, hipMemcpyDeviceToHost); hipFree(colls_GPU); hipFree(n_cols_GPU); */ } void GPU::calculateContact_GPU(vector<Collision> &contacts){ this->n_cols = 0; hipMemset(this->n_cols_GPU, 0, sizeof(unsigned int)); if(this->lro->size() >= this->MAX_cols_GPU){ hipFree(colls_GPU); this->MAX_cols_GPU = this->lro->size() * 2; hipMalloc((void **) &colls_GPU, sizeof(Collision) * this->MAX_cols_GPU * N_COL); } //hipMalloc((void **) &colls_GPU, sizeof(Collision) * this->lro->size() * N_COL); dim3 dimGrid(ceil((float)this->lro->size() / BLOCK)); dim3 dimBlock(BLOCK); hipLaunchKernelGGL(( calculateContacs_Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, circles_GPU, this->lro->size(), colls_GPU, this->n_cols_GPU); hipDeviceSynchronize(); hipMemcpy(&this->n_cols, this->n_cols_GPU, sizeof(unsigned int), hipMemcpyDeviceToHost); contacts.resize(n_cols); hipMemcpy(&(contacts[0]), &colls_GPU[0], sizeof(Collision) * this->n_cols, hipMemcpyDeviceToHost); } void GPU::integrateVelocities_GPU(){ dim3 dimGrid(ceil((float)this->lro->size() / BLOCK)); dim3 dimBlock(BLOCK); hipLaunchKernelGGL(( integrateVelocities_Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, circles_GPU, this->lro->size(), gravity, dt); hipDeviceSynchronize(); } void GPU::integrateForces_GPU(){ dim3 dimGrid(ceil((float)this->lro->size() / BLOCK)); dim3 dimBlock(BLOCK); hipLaunchKernelGGL(( integrateForces_Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, circles_GPU, this->lro->size(), gravity, dt); hipDeviceSynchronize(); } void GPU::initializeContext(){ //hipFree(0); hipLaunchKernelGGL(( initialize_Kernel), dim3(1),dim3(1), 0, 0, ); //Create in GPU to save hipMalloc's hipMalloc((void **) &this->n_cols_GPU, sizeof(int)); //hello<<<1,1>>>(); //hipDeviceSynchronize(); int device; hipGetDevice(&device); hipDeviceProp_t props; hipGetDeviceProperties(&props, device); printf("Cuda context initialized in device: \"%s\"\n", props.name); hipDeviceSynchronize(); return; } void GPU::copy_HostToDevice(){ if(this->lro->size() >= this->MAX_GPU_obj){ hipFree(circles_GPU); this->MAX_GPU_obj = 2 * this->lro->size(); hipMalloc((void **) &circles_GPU, sizeof(Circle) * this->MAX_GPU_obj); } hipMemcpyAsync(&circles_GPU[0], &(this->lro->vro[0]), sizeof(Circle) * this->lro->size(), hipMemcpyHostToDevice); this->N_GPU_obj = this->lro->size(); } void GPU::copy_DeviceToHost(){ hipMemcpyAsync(&(this->lro->vro[0]), &circles_GPU[0], sizeof(Circle) * this->lro->size(), hipMemcpyDeviceToHost); } int GPU::circlesInGPU(){ return this->N_GPU_obj; } GPU::GPU(){ this->N_GPU_obj = -1; this->MAX_GPU_obj = 0; this->MAX_cols_GPU = 0; } GPU::GPU(ListCircles * list){ this->lro = list; this->N_GPU_obj = 0; this->MAX_GPU_obj = 0; this->MAX_cols_GPU = 0; } GPU::~GPU(){ if(colls_GPU != 0){ hipFree(colls_GPU); hipFree(n_cols_GPU); hipFree(circles_GPU); } //printf("%d\n", n_cols_GPU); //printf("%d\n", circles_GPU); //if(n_cols_GPU != 0) // hipFree(n_cols_GPU); //if(circles_GPU != 0) // hipFree(circles_GPU); }
d0889a8848cb52c4d4ce257d9552dd49da940963.cu
#include <stdio.h> #include "gpu_routines.h" #include <cuda.h> #define BLOCK 32 #define N_COL 30 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){ if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void positionalCorrection_Kernel(Circle * circles, int n, Collision * colls, unsigned int * n_cols) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < (*n_cols)){ Collision c = colls[idx]; Circle A = circles[c.A]; Circle B = circles[c.B]; float k_slop = 0.05f; // Penetration allowance //float percent = 0.4f; // Penetration percentage to correct float corr_aux = 0.4f * (max(c.penetration - k_slop, 0.0f) / (A.inv_mass + B.inv_mass)); float corr_x = corr_aux * c.normal_x; float corr_y = corr_aux * c.normal_y; atomicAdd(&circles[c.A].px, (-1.0f) * (corr_x * A.inv_mass)); atomicAdd(&circles[c.A].py, (-1.0f) * (corr_y * A.inv_mass)); atomicAdd(&circles[c.B].px, (corr_x * B.inv_mass)); atomicAdd(&circles[c.B].py, (corr_y * B.inv_mass)); } } __global__ void solveCollisions_Kernel(Circle * circles, int n, Collision * colls, unsigned int * n_cols, int iterations, float gravity, float dt) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if(idx < (*n_cols)){ Collision c = colls[idx]; Circle A = circles[c.A]; Circle B = circles[c.B]; float Avx = A.vx, Avy = A.vy, AvA = A.angularVelocity; float Bvx = B.vx, Bvy = B.vy, BvA = B.angularVelocity; float rax = c.contact_x - A.px; float ray = c.contact_y - A.py; float rbx = c.contact_x - B.px; float rby = c.contact_y - B.py; float rvx = Bvx - (BvA * rby) - Avx + (AvA * ray); //ERROR POSIBLE float rvy = Bvy + (BvA * rbx) - Avy - (AvA * rax); float contact_vel = rvx * c.normal_x + rvy * c.normal_y; if(contact_vel > 0.0f) return; float raCrossN = (rax * c.normal_y) - (ray * c.normal_x); float rbCrossN = (rbx * c.normal_y) - (rby * c.normal_x); float invMassSum = A.inv_mass + B.inv_mass + raCrossN*raCrossN * A.inv_inertia + rbCrossN*rbCrossN * B.inv_inertia; float e = 0.2f; if((rvx * rvx + rvy * rvy) < ((dt * gravity * dt * gravity) + EPS)) e = 0.0f; float j = -(1.0f + e) * contact_vel; j /= invMassSum; float impulse_x = c.normal_x * j; float impulse_y = c.normal_y * j; Avx += A.inv_mass * (-impulse_x); Avy += A.inv_mass * (-impulse_y); AvA += A.inv_inertia * ((rax * (-impulse_y)) - (ray * (-impulse_x))); Bvx += B.inv_mass * (impulse_x); Bvy += B.inv_mass * (impulse_y); BvA += B.inv_inertia * ((rbx * (impulse_y)) - (rby * (impulse_x))); atomicAdd(&circles[c.A].vx, (Avx - A.vx)); atomicAdd(&circles[c.A].vy, (Avy - A.vy)); atomicAdd(&circles[c.A].angularVelocity, (AvA - A.angularVelocity)); atomicAdd(&circles[c.B].vx, (Bvx - B.vx)); atomicAdd(&circles[c.B].vy, (Bvy - B.vy)); atomicAdd(&circles[c.B].angularVelocity, (BvA - B.angularVelocity)); } } __global__ void calculateContacs_Kernel(Circle * circles, int n, Collision * colls, unsigned int * n_cols) { int i = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; int bloc = blockDim.x * blockIdx.x; __shared__ Circle sh_Circles[BLOCK]; //Circle ri; Collision c; float ripx, ripy, riinv_mass, riradius; float rjpx, rjpy, rjinv_mass, rjradius; if(i < n){ //ri = circles[i]; ripx = circles[i].px; ripy = circles[i].py; riinv_mass = circles[i].inv_mass; riradius = circles[i].radius; } for (unsigned int j = bloc; j < n; j += BLOCK){ if(j + tid < n) sh_Circles[tid] = circles[j + tid]; //__syncthreads(); for(int k = 0; k < BLOCK && j + k < n; ++k){ if(j + k <= i) continue; //Circle rj = sh_Circles[k]; rjinv_mass = sh_Circles[k].inv_mass; rjpx = sh_Circles[k].px; rjpy = sh_Circles[k].py; rjradius = sh_Circles[k].radius; if(riinv_mass == 0.0f && rjinv_mass == 0.0f) continue; c.A = i; c.B = j + k; c.normal_x = rjpx - ripx; c.normal_y = rjpy - ripy; //c.normal_x = __fsub_rd(rjpx, ripx); //c.normal_y = __fsub_rd(rjpy, ripy); float suma_radius = riradius + rjradius; //float suma_radius = __fadd_rd(riradius, rjradius); float squared_dist = c.normal_x * c.normal_x + c.normal_y * c.normal_y; //float suma_radius = ri.radius + rj.radius; if(squared_dist > suma_radius * suma_radius) continue; //Not contact float dist = sqrtf(squared_dist); float inv_dist = __frcp_rd(dist); if(dist < EPS) { c.penetration = riradius; c.normal_x = 1.0f; c.normal_y = 0.0f; c.contact_x = ripx; c.contact_y = ripy; } else{ c.penetration = suma_radius - dist; //c.penetration = __fsub_rd(suma_radius, dist); c.normal_x *= inv_dist; c.normal_y *= inv_dist; //c.normal_x = __fmul_rd(c.normal_x, inv_dist); //c.normal_y = __fmul_rd(c.normal_y, inv_dist); c.contact_x = c.normal_x * riradius + ripx; c.contact_y = c.normal_y * riradius + ripy; //c.contact_x = __fmaf_rd(c.normal_x, riradius, ripx); //c.contact_y = __fmaf_rd(c.normal_y, riradius, ripy); } //int idx = *n_cols; //(*n_cols)++; int idx = atomicInc(n_cols, 1e9); //Faster than atomicAdd(n_cols, 1)?? colls[idx] = c; } //__syncthreads(); } } __global__ void integrateVelocities_Kernel(Circle * circles, int n, float gravity, float dt) { int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < n){ //Circle c = circles[i]; its faster with more global reads?? if(circles[i].inv_mass > 0.0f){ circles[i].px += circles[i].vx * dt; circles[i].py += circles[i].vy * dt; circles[i].vy += gravity * (dt / 2.0f); } //circles[i] = c; } } __global__ void integrateForces_Kernel(Circle * circles, int n, float gravity, float dt) { int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < n){ //Circle c = circles[i]; if(circles[i].inv_mass > 0.0f) circles[i].vy += gravity * (dt / 2.0f); //circles[i] = c; } } __global__ void initialize_Kernel() { printf("GPU initialized\n"); } void GPU::positionalCorrection_GPU(){ dim3 dimGrid(ceil((float)this->n_cols / BLOCK)); dim3 dimBlock(BLOCK); positionalCorrection_Kernel<<<dimGrid,dimBlock>>>(circles_GPU, this->lro->size(), colls_GPU, this->n_cols_GPU); cudaDeviceSynchronize(); //cudaFree(colls_GPU); //cudaFree(n_cols_GPU); } void GPU::solveCollisions_GPU(vector<Collision> &contacts){ dim3 dimGrid(ceil((float)this->n_cols / BLOCK)); dim3 dimBlock(BLOCK); for(int i = 0; i < iterations; ++i){ solveCollisions_Kernel<<<dimGrid,dimBlock>>>(circles_GPU, this->lro->size(), colls_GPU, this->n_cols_GPU, iterations, gravity, dt); cudaDeviceSynchronize(); } /* contacts.resize(n_cols); cudaMemcpy(&(contacts[0]), &colls_GPU[0], sizeof(Collision) * this->n_cols, cudaMemcpyDeviceToHost); cudaFree(colls_GPU); cudaFree(n_cols_GPU); */ } void GPU::calculateContact_GPU(vector<Collision> &contacts){ this->n_cols = 0; cudaMemset(this->n_cols_GPU, 0, sizeof(unsigned int)); if(this->lro->size() >= this->MAX_cols_GPU){ cudaFree(colls_GPU); this->MAX_cols_GPU = this->lro->size() * 2; cudaMalloc((void **) &colls_GPU, sizeof(Collision) * this->MAX_cols_GPU * N_COL); } //cudaMalloc((void **) &colls_GPU, sizeof(Collision) * this->lro->size() * N_COL); dim3 dimGrid(ceil((float)this->lro->size() / BLOCK)); dim3 dimBlock(BLOCK); calculateContacs_Kernel<<<dimGrid,dimBlock>>>(circles_GPU, this->lro->size(), colls_GPU, this->n_cols_GPU); cudaDeviceSynchronize(); cudaMemcpy(&this->n_cols, this->n_cols_GPU, sizeof(unsigned int), cudaMemcpyDeviceToHost); contacts.resize(n_cols); cudaMemcpy(&(contacts[0]), &colls_GPU[0], sizeof(Collision) * this->n_cols, cudaMemcpyDeviceToHost); } void GPU::integrateVelocities_GPU(){ dim3 dimGrid(ceil((float)this->lro->size() / BLOCK)); dim3 dimBlock(BLOCK); integrateVelocities_Kernel<<<dimGrid,dimBlock>>>(circles_GPU, this->lro->size(), gravity, dt); cudaDeviceSynchronize(); } void GPU::integrateForces_GPU(){ dim3 dimGrid(ceil((float)this->lro->size() / BLOCK)); dim3 dimBlock(BLOCK); integrateForces_Kernel<<<dimGrid,dimBlock>>>(circles_GPU, this->lro->size(), gravity, dt); cudaDeviceSynchronize(); } void GPU::initializeContext(){ //cudaFree(0); initialize_Kernel<<<1,1>>>(); //Create in GPU to save cudaMalloc's cudaMalloc((void **) &this->n_cols_GPU, sizeof(int)); //hello<<<1,1>>>(); //cudaDeviceSynchronize(); int device; cudaGetDevice(&device); cudaDeviceProp props; cudaGetDeviceProperties(&props, device); printf("Cuda context initialized in device: \"%s\"\n", props.name); cudaDeviceSynchronize(); return; } void GPU::copy_HostToDevice(){ if(this->lro->size() >= this->MAX_GPU_obj){ cudaFree(circles_GPU); this->MAX_GPU_obj = 2 * this->lro->size(); cudaMalloc((void **) &circles_GPU, sizeof(Circle) * this->MAX_GPU_obj); } cudaMemcpyAsync(&circles_GPU[0], &(this->lro->vro[0]), sizeof(Circle) * this->lro->size(), cudaMemcpyHostToDevice); this->N_GPU_obj = this->lro->size(); } void GPU::copy_DeviceToHost(){ cudaMemcpyAsync(&(this->lro->vro[0]), &circles_GPU[0], sizeof(Circle) * this->lro->size(), cudaMemcpyDeviceToHost); } int GPU::circlesInGPU(){ return this->N_GPU_obj; } GPU::GPU(){ this->N_GPU_obj = -1; this->MAX_GPU_obj = 0; this->MAX_cols_GPU = 0; } GPU::GPU(ListCircles * list){ this->lro = list; this->N_GPU_obj = 0; this->MAX_GPU_obj = 0; this->MAX_cols_GPU = 0; } GPU::~GPU(){ if(colls_GPU != 0){ cudaFree(colls_GPU); cudaFree(n_cols_GPU); cudaFree(circles_GPU); } //printf("%d\n", n_cols_GPU); //printf("%d\n", circles_GPU); //if(n_cols_GPU != 0) // cudaFree(n_cols_GPU); //if(circles_GPU != 0) // cudaFree(circles_GPU); }
205c8a6715ff23bfa14d0682178508a14dc8674d.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu< float, 1, int32_t, float, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
205c8a6715ff23bfa14d0682178508a14dc8674d.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu< float, 1, int32_t, float, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
c0a4fadb36c455e6b866b2b6031ed0eec6730dd2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include<stdio.h> #include<iostream> //define the multithread action __global__ void cube(float * d_out, float * d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f*f; } //start main activity int main(int argc,char **argv){ //initilize array specs const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); //initalize array values float h_in[ARRAY_SIZE]; for(int i=0;i<ARRAY_SIZE;i++){ h_in[i]=float(i); } //print array std::cout<<"Before: \n"; for(int i=0;i<ARRAY_SIZE;i++){ printf("%f", h_in[i]); printf(((i%4)!=3) ? "\t" : "\n"); } std::cout<<"\n"; //initlize an array of the same size as our input float h_out[ARRAY_SIZE]; //initalize the inputs to the multithread functiuon float * d_in; float * d_out; //allocate memory for the arrays hipMalloc((void**) &d_in,ARRAY_BYTES); hipMalloc((void**) &d_out,ARRAY_BYTES); //error check //std::cout<<hipGetErrorString(hipGetLastError())<<std::endl; //copy array from CPU to GPU to preform function on GPU's threads hipMemcpy(d_in,h_in,ARRAY_BYTES,hipMemcpyHostToDevice); hipLaunchKernelGGL(( cube), dim3(1),dim3(ARRAY_SIZE), 0, 0, d_out,d_in); //copy result from function back from GPU to CPU hipMemcpy(h_out,d_out,ARRAY_BYTES,hipMemcpyDeviceToHost); //print result array std::cout<<"\nAfter: \n"; for(int i=0;i<ARRAY_SIZE;i++){ printf("%f", h_out[i]); printf(((i%4)!=3) ? "\t" : "\n"); } //free memory hipFree(d_in); hipFree(d_out); return 0; }
c0a4fadb36c455e6b866b2b6031ed0eec6730dd2.cu
#include<cuda_runtime.h> #include<stdio.h> #include<iostream> //define the multithread action __global__ void cube(float * d_out, float * d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f*f*f; } //start main activity int main(int argc,char **argv){ //initilize array specs const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); //initalize array values float h_in[ARRAY_SIZE]; for(int i=0;i<ARRAY_SIZE;i++){ h_in[i]=float(i); } //print array std::cout<<"Before: \n"; for(int i=0;i<ARRAY_SIZE;i++){ printf("%f", h_in[i]); printf(((i%4)!=3) ? "\t" : "\n"); } std::cout<<"\n"; //initlize an array of the same size as our input float h_out[ARRAY_SIZE]; //initalize the inputs to the multithread functiuon float * d_in; float * d_out; //allocate memory for the arrays cudaMalloc((void**) &d_in,ARRAY_BYTES); cudaMalloc((void**) &d_out,ARRAY_BYTES); //error check //std::cout<<cudaGetErrorString(cudaGetLastError())<<std::endl; //copy array from CPU to GPU to preform function on GPU's threads cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice); cube<<<1,ARRAY_SIZE>>>(d_out,d_in); //copy result from function back from GPU to CPU cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost); //print result array std::cout<<"\nAfter: \n"; for(int i=0;i<ARRAY_SIZE;i++){ printf("%f", h_out[i]); printf(((i%4)!=3) ? "\t" : "\n"); } //free memory cudaFree(d_in); cudaFree(d_out); return 0; }
993ba242227b71aff8adab993661281eb99b0372.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __device__ const char *STR = "deNet is ON!\n"; const char STR_LENGTH = 13; __global__ void deNet() { printf("%c\n", STR[threadIdx.x % STR_LENGTH]); } int main(void) { int num_threads = STR_LENGTH; int num_blocks = 1; hipLaunchKernelGGL(( deNet), dim3(num_blocks),dim3(num_threads), 0, 0, ); hipDeviceSynchronize(); return 0; }
993ba242227b71aff8adab993661281eb99b0372.cu
#include <stdio.h> __device__ const char *STR = "deNet is ON!\n"; const char STR_LENGTH = 13; __global__ void deNet() { printf("%c\n", STR[threadIdx.x % STR_LENGTH]); } int main(void) { int num_threads = STR_LENGTH; int num_blocks = 1; deNet<<<num_blocks,num_threads>>>(); cudaDeviceSynchronize(); return 0; }
ca59e1d161fdc66b599404a7c26f9ed8d60b54f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define FALSE 0 #define TRUE 1 __global__ void rand_init(long *a,long seed) { int tid=threadIdx.x+blockDim.x*blockIdx.x; //long a = 100001; a[tid] = seed + tid; } // returns random integer from 1 to lim __device__ int rand1(long *a, int lim) { int tid=threadIdx.x+blockDim.x*blockIdx.x; a[tid] = (a[tid] * 125) % 2796203; return ((a[tid] % lim) + 1); } // returns random integer from 1 to lim (Gerhard's generator) __device__ int rand2(long *a, int lim) { int tid=threadIdx.x+blockDim.x*blockIdx.x; a[tid] = (a[tid] * 32719 + 3) % 32749; return ((a[tid] % lim) + 1); } // returns random integer from 1 to lim (Bill's generator) __device__ int rand3(long *a, int lim) { int tid=threadIdx.x+blockDim.x*blockIdx.x; a[tid] = (((a[tid] * 214013L + 2531011L) >> 16) & 32767); return ((a[tid] % lim) + 1); } __host__ __device__ float lencalc(int *sx, int *sy, float *w, float *xpos, bool axis) { //int cost=0; auto int matchy[N];//,matchx[N]; auto float L[N]; for(int i=0;i<N;i++) { //matchx[sx[i]-1]=i; matchy[sy[i]-1]=i; L[i]=0; xpos[i]=0; } int b,p; float t; for(int i=0;i<N;i++){ if(axis) b=sx[i]-1; else b=sx[N-1-i]-1; p=matchy[b]; xpos[b]=L[p]; t=xpos[b]+w[b]; for(int j=p; j<N && t>L[j] ;j++) L[j]=t; } return L[N-1]; } __device__ void neighbourhood(int *sx,int *sy, int *sxnew, int *synew, long *seed){ int randxi=rand1(seed,N-1),randxj=rand1(seed,N-1); //In 1D gridding, use N-1,N. for 2D gridding use N-1,N-1 int randyi=rand1(seed,N ),randyj=rand1(seed,N ); //for 1D N, 2D N-1 //swap for(int i=0;i<N;i++){ sxnew[i]=sx[i]; synew[i]=sy[i]; } sxnew[randxi]=sx[randxj]; sxnew[randxj]=sx[randxi]; synew[randxi]=sy[randxj]; synew[randxj]=sy[randxi]; } __device__ void newneighbourhood(int *sx,int *sy, int *sxnew, int *synew, long *seed){ int randxi=rand1(seed,N-1),randxj=rand1(seed,N-1); //In 1D gridding, use N-1,N. for 2D gridding use N-1,N-1 int randyi=rand1(seed,N ),randyj=rand1(seed,N ); //for 1D N, 2D N-1 int choice=rand1(seed,3 ); for(int i=0;i<N;i++){ sxnew[i]=sx[i]; synew[i]=sy[i]; } if(choice==1){ //swap sx sxnew[randxi]=sx[randxj]; sxnew[randxj]=sx[randxi]; } else if(choice==2){ synew[randxi]=sy[randxj]; synew[randxj]=sy[randxi]; } else{ sxnew[randxi]=sx[randxj]; sxnew[randxj]=sx[randxi]; synew[randxi]=sy[randxj]; synew[randxj]=sy[randxi]; } } __global__ void annealing(float *w, float *h, int *perms , long *seed, int totalarea, float *returnArea){ int tid=threadIdx.x, uid=blockIdx.x; int index1d=threadIdx.x+blockDim.x*blockIdx.x; __shared__ float minarea[N]; __shared__ int minid[N]; auto int sxA[N], syA[N], sxnewA[N], synewA[N]; //Arrays auto int *sx=sxA, *sy=syA, *sxnew=sxnewA, *synew=synewA; //pointer seperately declared to tackle lvalue error for(int i=0;i<N;i++){ sx[i]=perms[uid*N+i]; sy[i]=perms[tid*N+i]; //use uid, if 1D grid, else 2D grid //perms[uid*N+i]=-sy[i]; } float t0=0.00000001, ts=0.01,T,delta; //dead space %4.868905504 in python, %8.26 in CUDA. int accepted=0; float Wid=0,Len=0,Area=0; auto float xpos[N],ypos[N]; float sdWid=0,sdLen=0,sdArea=0; float minsofar=0; Wid=lencalc(sx,sy,w,xpos,FALSE); //X-axis Len=lencalc(sx,sy,h,ypos,TRUE ); //Y-axis Area=Wid*Len; returnArea[index1d]=Area; for(int ancnt=0;ancnt<5000;ancnt++) //20000 { newneighbourhood(sx,sy,sxnew,synew,seed); sdWid=lencalc(sxnew,synew,w,xpos,FALSE); //X-axis sdLen=lencalc(sxnew,synew,h,ypos,TRUE ); //Y-axis sdArea=sdWid*sdLen; auto int *temp; if(sdArea<Area){ temp=sx; sx=sxnew; sxnew=temp; temp=sy; sy=synew; synew=temp; Area=sdArea; accepted=accepted+1; minsofar=Area; } else{ //take risk (uses random fn) float p= float(rand1(seed,N))/float(N); T = 1.0/(t0+ts*accepted); delta=sdArea-Area; if(p<exp(-delta/T)){ temp=sx; sx=sxnew; sxnew=temp; temp=sy; sy=synew; synew=temp; Area=sdArea; accepted=accepted+1; } } } minarea[threadIdx.x]=minsofar; minid[threadIdx.x]=threadIdx.x; for(int ruled=N/2;ruled>0;ruled/=2) if(threadIdx.x<ruled) if(minarea[threadIdx.x]>minarea[threadIdx.x+ruled]){ minarea[threadIdx.x]=minarea[threadIdx.x+ruled]; minid[threadIdx.x]=minid[threadIdx.x+ruled]; } if(threadIdx.x==minid[0]) for(int i=0;i<N;i++){ perms[uid*N+i]=sx[i]; //perms[uid*N+i]=sy[i]; //use uid, if 1D grid, else tid for 2D grid } returnArea[index1d]=minsofar; }
ca59e1d161fdc66b599404a7c26f9ed8d60b54f1.cu
#define FALSE 0 #define TRUE 1 __global__ void rand_init(long *a,long seed) { int tid=threadIdx.x+blockDim.x*blockIdx.x; //long a = 100001; a[tid] = seed + tid; } // returns random integer from 1 to lim __device__ int rand1(long *a, int lim) { int tid=threadIdx.x+blockDim.x*blockIdx.x; a[tid] = (a[tid] * 125) % 2796203; return ((a[tid] % lim) + 1); } // returns random integer from 1 to lim (Gerhard's generator) __device__ int rand2(long *a, int lim) { int tid=threadIdx.x+blockDim.x*blockIdx.x; a[tid] = (a[tid] * 32719 + 3) % 32749; return ((a[tid] % lim) + 1); } // returns random integer from 1 to lim (Bill's generator) __device__ int rand3(long *a, int lim) { int tid=threadIdx.x+blockDim.x*blockIdx.x; a[tid] = (((a[tid] * 214013L + 2531011L) >> 16) & 32767); return ((a[tid] % lim) + 1); } __host__ __device__ float lencalc(int *sx, int *sy, float *w, float *xpos, bool axis) { //int cost=0; auto int matchy[N];//,matchx[N]; auto float L[N]; for(int i=0;i<N;i++) { //matchx[sx[i]-1]=i; matchy[sy[i]-1]=i; L[i]=0; xpos[i]=0; } int b,p; float t; for(int i=0;i<N;i++){ if(axis) b=sx[i]-1; else b=sx[N-1-i]-1; p=matchy[b]; xpos[b]=L[p]; t=xpos[b]+w[b]; for(int j=p; j<N && t>L[j] ;j++) L[j]=t; } return L[N-1]; } __device__ void neighbourhood(int *sx,int *sy, int *sxnew, int *synew, long *seed){ int randxi=rand1(seed,N-1),randxj=rand1(seed,N-1); //In 1D gridding, use N-1,N. for 2D gridding use N-1,N-1 int randyi=rand1(seed,N ),randyj=rand1(seed,N ); //for 1D N, 2D N-1 //swap for(int i=0;i<N;i++){ sxnew[i]=sx[i]; synew[i]=sy[i]; } sxnew[randxi]=sx[randxj]; sxnew[randxj]=sx[randxi]; synew[randxi]=sy[randxj]; synew[randxj]=sy[randxi]; } __device__ void newneighbourhood(int *sx,int *sy, int *sxnew, int *synew, long *seed){ int randxi=rand1(seed,N-1),randxj=rand1(seed,N-1); //In 1D gridding, use N-1,N. for 2D gridding use N-1,N-1 int randyi=rand1(seed,N ),randyj=rand1(seed,N ); //for 1D N, 2D N-1 int choice=rand1(seed,3 ); for(int i=0;i<N;i++){ sxnew[i]=sx[i]; synew[i]=sy[i]; } if(choice==1){ //swap sx sxnew[randxi]=sx[randxj]; sxnew[randxj]=sx[randxi]; } else if(choice==2){ synew[randxi]=sy[randxj]; synew[randxj]=sy[randxi]; } else{ sxnew[randxi]=sx[randxj]; sxnew[randxj]=sx[randxi]; synew[randxi]=sy[randxj]; synew[randxj]=sy[randxi]; } } __global__ void annealing(float *w, float *h, int *perms , long *seed, int totalarea, float *returnArea){ int tid=threadIdx.x, uid=blockIdx.x; int index1d=threadIdx.x+blockDim.x*blockIdx.x; __shared__ float minarea[N]; __shared__ int minid[N]; auto int sxA[N], syA[N], sxnewA[N], synewA[N]; //Arrays auto int *sx=sxA, *sy=syA, *sxnew=sxnewA, *synew=synewA; //pointer seperately declared to tackle lvalue error for(int i=0;i<N;i++){ sx[i]=perms[uid*N+i]; sy[i]=perms[tid*N+i]; //use uid, if 1D grid, else 2D grid //perms[uid*N+i]=-sy[i]; } float t0=0.00000001, ts=0.01,T,delta; //dead space %4.868905504 in python, %8.26 in CUDA. int accepted=0; float Wid=0,Len=0,Area=0; auto float xpos[N],ypos[N]; float sdWid=0,sdLen=0,sdArea=0; float minsofar=0; Wid=lencalc(sx,sy,w,xpos,FALSE); //X-axis Len=lencalc(sx,sy,h,ypos,TRUE ); //Y-axis Area=Wid*Len; returnArea[index1d]=Area; for(int ancnt=0;ancnt<5000;ancnt++) //20000 { newneighbourhood(sx,sy,sxnew,synew,seed); sdWid=lencalc(sxnew,synew,w,xpos,FALSE); //X-axis sdLen=lencalc(sxnew,synew,h,ypos,TRUE ); //Y-axis sdArea=sdWid*sdLen; auto int *temp; if(sdArea<Area){ temp=sx; sx=sxnew; sxnew=temp; temp=sy; sy=synew; synew=temp; Area=sdArea; accepted=accepted+1; minsofar=Area; } else{ //take risk (uses random fn) float p= float(rand1(seed,N))/float(N); T = 1.0/(t0+ts*accepted); delta=sdArea-Area; if(p<exp(-delta/T)){ temp=sx; sx=sxnew; sxnew=temp; temp=sy; sy=synew; synew=temp; Area=sdArea; accepted=accepted+1; } } } minarea[threadIdx.x]=minsofar; minid[threadIdx.x]=threadIdx.x; for(int ruled=N/2;ruled>0;ruled/=2) if(threadIdx.x<ruled) if(minarea[threadIdx.x]>minarea[threadIdx.x+ruled]){ minarea[threadIdx.x]=minarea[threadIdx.x+ruled]; minid[threadIdx.x]=minid[threadIdx.x+ruled]; } if(threadIdx.x==minid[0]) for(int i=0;i<N;i++){ perms[uid*N+i]=sx[i]; //perms[uid*N+i]=sy[i]; //use uid, if 1D grid, else tid for 2D grid } returnArea[index1d]=minsofar; }
8dc9fe907163d220f77732120b043e5369451318.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // @file subsampling_gpu.cu // @brief Subsampling block implementation (GPU) // @author Andrea Vedaldi // @author Karel Lenc /* Copyright (C) 2014-15 Andrea Vedaldi and Karel Lenc. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "subsample.hpp" #include "../datacu.hpp" #include <assert.h> #include <float.h> #include <iostream> #ifndef ENABLE_GPU #error "subsample_gpu.cu cannot be compiled without GPU support" #endif using namespace vl ; /* ---------------------------------------------------------------- */ /* subsample forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void subsample_gpu_kernel (T* subsampled, const T* data, const int subsampledWidth, const int subsampledHeight, const int subsampledVolume, const int width, const int height, const int strideX, const int strideY, const int padLeft, const int padTop) { int subsampledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (subsampledIndex < subsampledVolume) { /* subsampledIndex = x + y * subsampledWidth + z * (subsampledWidth * subsampledHeight) ; */ int px = subsampledIndex ; int py = px / subsampledWidth ; int pz = py / subsampledHeight ; px %= subsampledWidth ; py %= subsampledHeight ; int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; data += pz * (width*height) ; T value = 0 ; if (x1 >= 0 && x1 < width && y1 >= 0 && y1 < height) { value = data[y1 * width + x1] ; } subsampled[subsampledIndex] = value ; } } template<typename T> static vl::Error subsample_forward_gpu(Context & context, T* subsampled, T const* data, size_t width, size_t height, size_t depth, size_t strideX, size_t strideY, size_t padLeft, size_t padRight, size_t padTop, size_t padBottom) { int subsampledWidth = (width + (padLeft+padRight) - 1)/strideX + 1 ; int subsampledHeight = (height + (padTop+padBottom) - 1)/strideY + 1 ; int subsampledVolume = subsampledWidth * subsampledHeight * depth ; hipLaunchKernelGGL(( subsample_gpu_kernel<T>) , dim3(divideUpwards(subsampledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, subsampled, data, subsampledWidth, subsampledHeight, subsampledVolume, width, height, strideX, strideY, padLeft, padTop); return context.setError(context.getCudaHelper().catchCudaError("subsample_backward_gpu<>: ")) ; } template <> vl::Error vl::impl::subsample_forward<vl::GPU, float>(vl::Context& context, float* subsampled, float const* data, size_t height, size_t width, size_t depth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { vl::Error error ; error = subsample_forward_gpu<float>(context, subsampled, data, height, width, depth, strideY, strideX, padTop, padBottom, padLeft, padRight) ; return context.passError(error, "subsample_forward<GPU,float>: ") ; } /* ---------------------------------------------------------------- */ /* subsampleBackward (GPU) */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void subsampleBackward_gpu_kernel (T* dzdx, const T* dzdy, const int subsampledWidth, const int subsampledHeight, const int dataVolume, const int width, const int height, const int strideX, const int strideY, const int padLeft, const int padTop) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < dataVolume) { int x = index ; int y = x / width ; int z = y / height ; x %= width ; y %= height ; dzdy += z * subsampledHeight * subsampledWidth ; int px = (x + padLeft) / strideX ; int py = (y + padTop) / strideY ; if (x == strideX * px - padLeft && y == strideY * py - padTop) { dzdx[index] = dzdy[py * subsampledWidth + px] ; } else { dzdx[index] = 0 ; } } } template<typename T> vl::Error subsample_backward_gpu(vl::Context& context, T* dzdx, T const* dzdy, size_t width, size_t height, size_t depth, size_t strideX, size_t strideY, size_t padLeft, size_t padRight, size_t padTop, size_t padBottom) { int subsampledWidth = (width + (padLeft+padRight) - 1)/strideX + 1 ; int subsampledHeight = (height + (padTop+padBottom) - 1)/strideY + 1 ; int nthreads = width * height * depth ; hipLaunchKernelGGL(( subsampleBackward_gpu_kernel<T>) , dim3(divideUpwards(nthreads, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, dzdx, dzdy, subsampledWidth, subsampledHeight, nthreads, width, height, strideX, strideY, padLeft, padTop); return context.setError(context.getCudaHelper().catchCudaError("subsample_backward_gpu<>: ")) ; } template <> vl::Error vl::impl::subsample_backward<vl::GPU, float>(vl::Context& context, float* derData, float const* derSubsampled, size_t height, size_t width, size_t depth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { vl::Error error ; error = subsample_backward_gpu<float>(context, derData, derSubsampled, height, width, depth, strideY, strideX, padTop, padBottom, padLeft, padRight) ; return context.passError(error, "subsample_backward<GPU,float>: ") ; }
8dc9fe907163d220f77732120b043e5369451318.cu
// @file subsampling_gpu.cu // @brief Subsampling block implementation (GPU) // @author Andrea Vedaldi // @author Karel Lenc /* Copyright (C) 2014-15 Andrea Vedaldi and Karel Lenc. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "subsample.hpp" #include "../datacu.hpp" #include <assert.h> #include <float.h> #include <iostream> #ifndef ENABLE_GPU #error "subsample_gpu.cu cannot be compiled without GPU support" #endif using namespace vl ; /* ---------------------------------------------------------------- */ /* subsample forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void subsample_gpu_kernel (T* subsampled, const T* data, const int subsampledWidth, const int subsampledHeight, const int subsampledVolume, const int width, const int height, const int strideX, const int strideY, const int padLeft, const int padTop) { int subsampledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (subsampledIndex < subsampledVolume) { /* subsampledIndex = x + y * subsampledWidth + z * (subsampledWidth * subsampledHeight) ; */ int px = subsampledIndex ; int py = px / subsampledWidth ; int pz = py / subsampledHeight ; px %= subsampledWidth ; py %= subsampledHeight ; int x1 = px * strideX - padLeft ; int y1 = py * strideY - padTop ; data += pz * (width*height) ; T value = 0 ; if (x1 >= 0 && x1 < width && y1 >= 0 && y1 < height) { value = data[y1 * width + x1] ; } subsampled[subsampledIndex] = value ; } } template<typename T> static vl::Error subsample_forward_gpu(Context & context, T* subsampled, T const* data, size_t width, size_t height, size_t depth, size_t strideX, size_t strideY, size_t padLeft, size_t padRight, size_t padTop, size_t padBottom) { int subsampledWidth = (width + (padLeft+padRight) - 1)/strideX + 1 ; int subsampledHeight = (height + (padTop+padBottom) - 1)/strideY + 1 ; int subsampledVolume = subsampledWidth * subsampledHeight * depth ; subsample_gpu_kernel<T> <<< divideUpwards(subsampledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (subsampled, data, subsampledWidth, subsampledHeight, subsampledVolume, width, height, strideX, strideY, padLeft, padTop); return context.setError(context.getCudaHelper().catchCudaError("subsample_backward_gpu<>: ")) ; } template <> vl::Error vl::impl::subsample_forward<vl::GPU, float>(vl::Context& context, float* subsampled, float const* data, size_t height, size_t width, size_t depth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { vl::Error error ; error = subsample_forward_gpu<float>(context, subsampled, data, height, width, depth, strideY, strideX, padTop, padBottom, padLeft, padRight) ; return context.passError(error, "subsample_forward<GPU,float>: ") ; } /* ---------------------------------------------------------------- */ /* subsampleBackward (GPU) */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void subsampleBackward_gpu_kernel (T* dzdx, const T* dzdy, const int subsampledWidth, const int subsampledHeight, const int dataVolume, const int width, const int height, const int strideX, const int strideY, const int padLeft, const int padTop) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < dataVolume) { int x = index ; int y = x / width ; int z = y / height ; x %= width ; y %= height ; dzdy += z * subsampledHeight * subsampledWidth ; int px = (x + padLeft) / strideX ; int py = (y + padTop) / strideY ; if (x == strideX * px - padLeft && y == strideY * py - padTop) { dzdx[index] = dzdy[py * subsampledWidth + px] ; } else { dzdx[index] = 0 ; } } } template<typename T> vl::Error subsample_backward_gpu(vl::Context& context, T* dzdx, T const* dzdy, size_t width, size_t height, size_t depth, size_t strideX, size_t strideY, size_t padLeft, size_t padRight, size_t padTop, size_t padBottom) { int subsampledWidth = (width + (padLeft+padRight) - 1)/strideX + 1 ; int subsampledHeight = (height + (padTop+padBottom) - 1)/strideY + 1 ; int nthreads = width * height * depth ; subsampleBackward_gpu_kernel<T> <<< divideUpwards(nthreads, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (dzdx, dzdy, subsampledWidth, subsampledHeight, nthreads, width, height, strideX, strideY, padLeft, padTop); return context.setError(context.getCudaHelper().catchCudaError("subsample_backward_gpu<>: ")) ; } template <> vl::Error vl::impl::subsample_backward<vl::GPU, float>(vl::Context& context, float* derData, float const* derSubsampled, size_t height, size_t width, size_t depth, size_t strideY, size_t strideX, size_t padTop, size_t padBottom, size_t padLeft, size_t padRight) { vl::Error error ; error = subsample_backward_gpu<float>(context, derData, derSubsampled, height, width, depth, strideY, strideX, padTop, padBottom, padLeft, padRight) ; return context.passError(error, "subsample_backward<GPU,float>: ") ; }
b7474467a34795a1537608534bba153798aef2a2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // simple kernel function that adds two vectors __global__ void vect_add(float *a, float *b, int N) { int idx = threadIdx.x; if (idx<N) a[idx] = a[idx] + b[idx]; } // function called from main fortran program extern "C" void kernel_wrapper_(float *a, float *b, int *Np) { float *a_d, *b_d; // declare GPU vector copies int blocks = 1; // uses 1 block of int N = *Np; // N threads on GPU // Allocate memory on GPU hipMalloc( (void **)&a_d, sizeof(float) * N ); hipMalloc( (void **)&b_d, sizeof(float) * N ); // copy vectors from CPU to GPU hipMemcpy( a_d, a, sizeof(float) * N, hipMemcpyHostToDevice ); hipMemcpy( b_d, b, sizeof(float) * N, hipMemcpyHostToDevice ); // call function on GPU hipLaunchKernelGGL(( vect_add), dim3(blocks), dim3(N) , 0, 0, a_d, b_d, N); // copy vectors back from GPU to CPU hipMemcpy( a, a_d, sizeof(float) * N, hipMemcpyDeviceToHost ); hipMemcpy( b, b_d, sizeof(float) * N, hipMemcpyDeviceToHost ); // free GPU memory hipFree(a_d); hipFree(b_d); return; }
b7474467a34795a1537608534bba153798aef2a2.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> // simple kernel function that adds two vectors __global__ void vect_add(float *a, float *b, int N) { int idx = threadIdx.x; if (idx<N) a[idx] = a[idx] + b[idx]; } // function called from main fortran program extern "C" void kernel_wrapper_(float *a, float *b, int *Np) { float *a_d, *b_d; // declare GPU vector copies int blocks = 1; // uses 1 block of int N = *Np; // N threads on GPU // Allocate memory on GPU cudaMalloc( (void **)&a_d, sizeof(float) * N ); cudaMalloc( (void **)&b_d, sizeof(float) * N ); // copy vectors from CPU to GPU cudaMemcpy( a_d, a, sizeof(float) * N, cudaMemcpyHostToDevice ); cudaMemcpy( b_d, b, sizeof(float) * N, cudaMemcpyHostToDevice ); // call function on GPU vect_add<<< blocks, N >>>( a_d, b_d, N); // copy vectors back from GPU to CPU cudaMemcpy( a, a_d, sizeof(float) * N, cudaMemcpyDeviceToHost ); cudaMemcpy( b, b_d, sizeof(float) * N, cudaMemcpyDeviceToHost ); // free GPU memory cudaFree(a_d); cudaFree(b_d); return; }
908c0ee00ea45b0c7f05a148e56c67a67bd9292b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" // This is my DEVICE function // __global__ means this function is visible to the host __global__ void kernelHelloWorld() { int thread = threadIdx.x; //local thread number in a block int block = blockIdx.x; //block number printf("Hello World from thread %d of block %d!\n", thread, block); } int main(int argc, char** argv) { int Nblocks = 10; //number of blocks int Nthreads = 3; //number of threads per block // run the function 'kernelHelloWorld' on the DEVICE hipLaunchKernelGGL(( kernelHelloWorld) , dim3(Nblocks), dim3(Nthreads) , 0, 0, ); //wait for the DEVICE function to complete before moving on hipDeviceSynchronize(); return 0; }
908c0ee00ea45b0c7f05a148e56c67a67bd9292b.cu
#include <stdio.h> #include <stdlib.h> #include "cuda.h" // This is my DEVICE function // __global__ means this function is visible to the host __global__ void kernelHelloWorld() { int thread = threadIdx.x; //local thread number in a block int block = blockIdx.x; //block number printf("Hello World from thread %d of block %d!\n", thread, block); } int main(int argc, char** argv) { int Nblocks = 10; //number of blocks int Nthreads = 3; //number of threads per block // run the function 'kernelHelloWorld' on the DEVICE kernelHelloWorld <<< Nblocks, Nthreads >>> (); //wait for the DEVICE function to complete before moving on cudaDeviceSynchronize(); return 0; }
b5d0508dac22e35bbdda13640051d1c22b7da7e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // -------------------------------------------------------- // R-FCN // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/psroi_align_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void PSROIAlignForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const int output_dim, const int group_size, Dtype* top_data, int* mapping_channel) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = bottom_rois[3] * spatial_scale; Dtype roi_end_h = bottom_rois[4] * spatial_scale; Dtype roi_width = roi_end_w - roi_start_w; Dtype roi_height = roi_end_h - roi_start_h; Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>(ph) * bin_size_h + roi_start_h; Dtype wstart = static_cast<Dtype>(pw)* bin_size_w + roi_start_w; Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h; Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w; bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop*group_size + gh)*group_size + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype out_sum = 0; // Selecting four regular locations for bilinear interpolation for (Dtype h = hstart + bin_size_h / Dtype(4); h < hend; h += bin_size_h / Dtype(2)) { if (h < 0 || h > height - 1) { continue; } for (Dtype w = wstart + bin_size_w / Dtype(4); w < wend; w += bin_size_w / Dtype(2)) { if (w < 0 || w > width - 1) { continue; } int x_left = floor(w); int x_right = ceil(w); if (x_right == x_left) { x_right = x_left + 1; } int y_bottom = floor(h); int y_top = ceil(h); if (y_top == y_bottom) { y_top = y_bottom + 1; } int top_left_index = y_top * width + x_left; int top_right_index = y_top * width + x_right; int bottom_left_index = y_bottom * width + x_left; int bottom_right_index = y_bottom * width + x_right; Dtype val = 0; val += (1 - w + x_left) * (1 - y_top + h) * bottom_data[top_left_index]; val += (1 - x_right + w) * (1 - y_top + h) * bottom_data[top_right_index]; val += (1 - w + x_left) * (1 - h + y_bottom) * bottom_data[bottom_left_index]; val += (1 - x_right + w) * (1 - h + y_bottom) * bottom_data[bottom_right_index]; out_sum += val; } } top_data[index] = is_empty? 0. : out_sum / 4; mapping_channel[index] = c; } } template <typename Dtype> void PSROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), top_data); caffe_gpu_set(count, -1, mapping_channel_ptr); // NOLINT_NEXT_LINE(whitespace/operators) PSROIAlignForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void PSROIAlignBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = bottom_rois[3] * spatial_scale; Dtype roi_end_h = bottom_rois[4] * spatial_scale; Dtype roi_width = roi_end_w - roi_start_w; Dtype roi_height = roi_end_h - roi_start_h; Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>(ph) * bin_size_h + roi_start_h; Dtype wstart = static_cast<Dtype>(pw)* bin_size_w + roi_start_w; Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h; Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w; bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; Dtype diff_val = is_empty ? 0. : top_diff[index] / 4; // Selecting four regular locations for bilinear interpolation for (Dtype h = hstart + bin_size_h / Dtype(4); h < hend; h += bin_size_h / Dtype(2)) { if (h < 0 || h > height - 1) { continue; } for (Dtype w = wstart + bin_size_w / Dtype(4); w < wend; w += bin_size_w / Dtype(2)) { if (w < 0 || w > width - 1) { continue; } int x_left = floor(w); int x_right = ceil(w); if (x_right == x_left) { x_right = x_left + 1; } int y_bottom = floor(h); int y_top = ceil(h); if (y_top == y_bottom) { y_top = y_bottom + 1; } int top_left_index = y_top * width + x_left; int top_right_index = y_top * width + x_right; int bottom_left_index = y_bottom * width + x_left; int bottom_right_index = y_bottom * width + x_right; caffe_gpu_atomic_add(diff_val * (x_right - w) * (h - y_bottom), offset_bottom_diff + top_left_index); caffe_gpu_atomic_add(diff_val * (w - x_left) * (h - y_bottom), offset_bottom_diff + top_right_index); caffe_gpu_atomic_add(diff_val * (x_right - w) * (y_top - h), offset_bottom_diff + bottom_left_index); caffe_gpu_atomic_add(diff_val * (w - x_left) * (y_top - h), offset_bottom_diff + bottom_right_index); } } } } template <typename Dtype> void PSROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); const int* mapping_channel_ptr = mapping_channel_.gpu_data(); caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); caffe_gpu_set(bottom_count, Dtype(0), bottom_diff); const int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) PSROIAlignBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, output_dim_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PSROIAlignLayer); } // namespace caffe
b5d0508dac22e35bbdda13640051d1c22b7da7e0.cu
// -------------------------------------------------------- // R-FCN // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/psroi_align_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void PSROIAlignForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, const int output_dim, const int group_size, Dtype* top_data, int* mapping_channel) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = bottom_rois[3] * spatial_scale; Dtype roi_end_h = bottom_rois[4] * spatial_scale; Dtype roi_width = roi_end_w - roi_start_w; Dtype roi_height = roi_end_h - roi_start_h; Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>(ph) * bin_size_h + roi_start_h; Dtype wstart = static_cast<Dtype>(pw)* bin_size_w + roi_start_w; Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h; Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w; bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop*group_size + gh)*group_size + gw; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype out_sum = 0; // Selecting four regular locations for bilinear interpolation for (Dtype h = hstart + bin_size_h / Dtype(4); h < hend; h += bin_size_h / Dtype(2)) { if (h < 0 || h > height - 1) { continue; } for (Dtype w = wstart + bin_size_w / Dtype(4); w < wend; w += bin_size_w / Dtype(2)) { if (w < 0 || w > width - 1) { continue; } int x_left = floor(w); int x_right = ceil(w); if (x_right == x_left) { x_right = x_left + 1; } int y_bottom = floor(h); int y_top = ceil(h); if (y_top == y_bottom) { y_top = y_bottom + 1; } int top_left_index = y_top * width + x_left; int top_right_index = y_top * width + x_right; int bottom_left_index = y_bottom * width + x_left; int bottom_right_index = y_bottom * width + x_right; Dtype val = 0; val += (1 - w + x_left) * (1 - y_top + h) * bottom_data[top_left_index]; val += (1 - x_right + w) * (1 - y_top + h) * bottom_data[top_right_index]; val += (1 - w + x_left) * (1 - h + y_bottom) * bottom_data[bottom_left_index]; val += (1 - x_right + w) * (1 - h + y_bottom) * bottom_data[bottom_right_index]; out_sum += val; } } top_data[index] = is_empty? 0. : out_sum / 4; mapping_channel[index] = c; } } template <typename Dtype> void PSROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), top_data); caffe_gpu_set(count, -1, mapping_channel_ptr); // NOLINT_NEXT_LINE(whitespace/operators) PSROIAlignForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void PSROIAlignBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = bottom_rois[3] * spatial_scale; Dtype roi_end_h = bottom_rois[4] * spatial_scale; Dtype roi_width = roi_end_w - roi_start_w; Dtype roi_height = roi_end_h - roi_start_h; Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>(ph) * bin_size_h + roi_start_h; Dtype wstart = static_cast<Dtype>(pw)* bin_size_w + roi_start_w; Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h + roi_start_h; Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w + roi_start_w; bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; Dtype diff_val = is_empty ? 0. : top_diff[index] / 4; // Selecting four regular locations for bilinear interpolation for (Dtype h = hstart + bin_size_h / Dtype(4); h < hend; h += bin_size_h / Dtype(2)) { if (h < 0 || h > height - 1) { continue; } for (Dtype w = wstart + bin_size_w / Dtype(4); w < wend; w += bin_size_w / Dtype(2)) { if (w < 0 || w > width - 1) { continue; } int x_left = floor(w); int x_right = ceil(w); if (x_right == x_left) { x_right = x_left + 1; } int y_bottom = floor(h); int y_top = ceil(h); if (y_top == y_bottom) { y_top = y_bottom + 1; } int top_left_index = y_top * width + x_left; int top_right_index = y_top * width + x_right; int bottom_left_index = y_bottom * width + x_left; int bottom_right_index = y_bottom * width + x_right; caffe_gpu_atomic_add(diff_val * (x_right - w) * (h - y_bottom), offset_bottom_diff + top_left_index); caffe_gpu_atomic_add(diff_val * (w - x_left) * (h - y_bottom), offset_bottom_diff + top_right_index); caffe_gpu_atomic_add(diff_val * (x_right - w) * (y_top - h), offset_bottom_diff + bottom_left_index); caffe_gpu_atomic_add(diff_val * (w - x_left) * (y_top - h), offset_bottom_diff + bottom_right_index); } } } } template <typename Dtype> void PSROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); const int* mapping_channel_ptr = mapping_channel_.gpu_data(); caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); caffe_gpu_set(bottom_count, Dtype(0), bottom_diff); const int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) PSROIAlignBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, output_dim_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PSROIAlignLayer); } // namespace caffe
f54a65df42feda44a835770df04fdc107c527005.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <cusolverDn.h> #include <hip/hip_complex.h> #include "parameters.h" #include "device_funcs.h" void smith_par_getAs (int n, int q, hipComplex *x_answer); void get_power_series (hipDoubleComplex *power_series, int q); void fill_r_matrix (hipDoubleComplex *power_series, hipDoubleComplex **rMatrix, int q); void get_normalized_hermite_coefficients (hipDoubleComplex **matrix, int n, double scaling, char type); int linearSolverLU (hipsolverDnHandle_t handle, int n, const hipDoubleComplex *Acopy, int lda, const hipDoubleComplex *b, hipDoubleComplex *x); /* int main() { int n,q; //Request number of A's to find printf("q (# of A terms): "); scanf("%d", &q); //Request a degree for the normalized hermite polynomial printf("n (degree of Hermite moment being closed): "); scanf("%d", &n); printf("\n"); hipComplex *x_answer = (hipComplex*) malloc(q*sizeof(hipComplex)); smith_par_getAs(n,q, x_answer); for (int i = 0; i < q; i++) { printf("A_n-%d: %f + %fi\n", i+1, cuCrealf(x_answer[i]),cuCimagf(x_answer[i])); } free(x_answer); return 0; } */ /* Pade Approximant to find A coefficients * Calculation can be found on pg. 37-38 of Smith's thesis * n is the degree of the Hermite moment being closed * q is the number of coefficients used for the nth moment closure * x_answer stores the final result */ void smith_par_getAs(int n, int q, hipComplex *x_answer) { int i, j, k; // Create matrices for r, P coefficients, Q coefficients, P, Q, and the final LHS matrix hipDoubleComplex **rMatrix = (hipDoubleComplex **) malloc(q * sizeof(hipDoubleComplex *)); hipDoubleComplex **PCoefficients = (hipDoubleComplex **) malloc((n+1) * sizeof(hipDoubleComplex *)); hipDoubleComplex **QCoefficients = (hipDoubleComplex **) malloc((n+1) * sizeof(hipDoubleComplex *)); hipDoubleComplex **PMatrix = (hipDoubleComplex **) malloc(q * sizeof(hipDoubleComplex *)); hipDoubleComplex **QMatrix = (hipDoubleComplex **) malloc(q * sizeof(hipDoubleComplex *)); hipDoubleComplex **lhsMatrix = (hipDoubleComplex **) malloc(q * sizeof(hipDoubleComplex *)); for (i = 0; i < q; i++) { rMatrix[i] = (hipDoubleComplex *) calloc(q, sizeof(hipDoubleComplex)); PCoefficients[i] = (hipDoubleComplex *) calloc(i+1, sizeof(hipDoubleComplex)); QCoefficients[i] = (hipDoubleComplex *) calloc(i+1, sizeof(hipDoubleComplex)); PMatrix[i] = (hipDoubleComplex *) calloc(q, sizeof(hipDoubleComplex)); QMatrix[i] = (hipDoubleComplex *) calloc(q, sizeof(hipDoubleComplex)); lhsMatrix[i] = (hipDoubleComplex *) calloc(q, sizeof(hipDoubleComplex)); } for (i = q; i <= n; i++) { PCoefficients[i] = (hipDoubleComplex *) calloc(i+1, sizeof(hipDoubleComplex)); QCoefficients[i] = (hipDoubleComplex *) calloc(i+1, sizeof(hipDoubleComplex)); } // Create Pn and Qn vectors hipDoubleComplex *Pn = (hipDoubleComplex *) calloc(q, sizeof(hipDoubleComplex)); hipDoubleComplex *Qn = (hipDoubleComplex *) calloc(q, sizeof(hipDoubleComplex)); // Create lhsVector and rhsVector (lhsVector is the final LHS matrix as a single vector; // rhsVector is the b vector in Ax = b) hipDoubleComplex *lhsVector = (hipDoubleComplex *) calloc(q*q, sizeof(hipDoubleComplex)); hipDoubleComplex *rhsVector = (hipDoubleComplex *) calloc(q, sizeof(hipDoubleComplex)); //Create power_series array hipDoubleComplex *power_series = (hipDoubleComplex *) calloc(q, sizeof(hipDoubleComplex)); hipDoubleComplex I = make_cuDoubleComplex(0., 1.); // Fill power_series array and rMatrix get_power_series(power_series, q); fill_r_matrix(power_series, rMatrix, q); // Get P and Q coefficients get_normalized_hermite_coefficients(PCoefficients, n, 1/sqrt(2), 'P'); get_normalized_hermite_coefficients(QCoefficients, n, 1/sqrt(2), 'Q'); //Fill P and Q matrices and vectors int col = 0; // column of P and Q matrix filled int d; // degree of polynomial filling each column for(d = n; d >= n - q; d--){ if (d != n) { for (i = 0; i < q; i++) { // Only filling indices where the degree of the term is less than // or equal to the degree of the corresponding polynomial if (i <= d) { PMatrix[i][col] = PCoefficients[d][i]; QMatrix[i][col] = QCoefficients[d][i]; } } col++; } else { for (i = 0; i < q; i++) { Pn[i] = PCoefficients[n][i]; Qn[i] = QCoefficients[n][i]; } } } // Freeing PCoefficients and QCoefficients for (i = 0; i <= n; i++) { free(PCoefficients[i]); free(QCoefficients[i]); } free(PCoefficients); free(QCoefficients); // Get LHS matrix hipDoubleComplex sum; for(i = 0; i < q; i++){ for(j = 0; j < q; j++){ sum.x = 0; sum.y = 0; for(k = 0; k < q; k++){ sum = cuCadd(sum, cuCmul(rMatrix[i][k], PMatrix[k][j])); } lhsMatrix[i][j] = cuCsub(sum, cuCmul(I,QMatrix[i][j])); } } //Transpose of lhsMatrix and putting into one vector for cuSolver/cuBlas call below for(i = 0; i < q; i++){ for(j = 0; j < q; j++){ lhsVector[j + q*i] = lhsMatrix[j][i]; } } // Get RHS vector for(j = 0; j < q; j++){ sum.x = 0; sum.y = 0; for(k = 0; k < q; k++){ sum = cuCadd(sum, cuCmul(rMatrix[j][k],Pn[k])); } rhsVector[j] = cuCsub(sum, cuCmul(I,Qn[j])); } // Creating CUDA array copy of lhsVector hipDoubleComplex * lhsVector_d = nullptr; hipMalloc(&lhsVector_d, q*q*sizeof(hipDoubleComplex)); // hipMemcpy(lhsVector_d, (hipDoubleComplex*) lhsVector, q*q*sizeof(hipDoubleComplex), hipMemcpyHostToDevice); CP_TO_GPU (lhsVector_d, (hipDoubleComplex*) lhsVector, q*q*sizeof(hipDoubleComplex)); // Creating CUDA array copy of rhsVector hipDoubleComplex *rhsVector_d = nullptr; hipMalloc(&rhsVector_d, q*sizeof(hipDoubleComplex)); // hipMemcpy(rhsVector_d, (hipDoubleComplex*) rhsVector, q*sizeof(hipDoubleComplex), hipMemcpyHostToDevice); CP_TO_GPU (rhsVector_d, (hipDoubleComplex*) rhsVector, q*sizeof(hipDoubleComplex)); // Setting up CUDA handlers and stream hipsolverDnHandle_t handle = NULL; hipblasHandle_t cublasHandle = NULL; hipStream_t stream = NULL; hipsolverDnCreate(&handle); hipblasCreate(&cublasHandle); hipStreamCreate(&stream); hipsolverDnSetStream(handle, stream); hipblasSetStream(cublasHandle, stream); // Calling cuSolver and cuBLAS libraries to solve Ax = b linearSolverLU(handle, q, lhsVector_d, q, rhsVector_d, rhsVector_d); // Converting hipDoubleComplex answer to hipComplex and storing it in x_answer hipComplex * rhsVector_d_float = nullptr; hipMalloc(&rhsVector_d_float, q*q*sizeof(hipComplex)); hipLaunchKernelGGL(( castDoubleToFloat), dim3(1),dim3(1), 0, 0, rhsVector_d, rhsVector_d_float, q); CP_TO_CPU (x_answer, rhsVector_d_float, sizeof(hipComplex)*q); // Print only if debugging if (0==1) { // Print r values printf("r coefficients\n"); for (i = 0; i < q; i++) printf("r_%d: %f + i%f\n", i, power_series[i].x,power_series[i].y); // Print r matrix printf("\nMatrix r\n"); for (i = 0; i < q; i++) { for (j = 0; j < q; j++) { printf("%2.2f + %2.2fi ",rMatrix[i][j].x,rMatrix[i][j].y); } printf("\n"); } printf("\n"); // Print P matrix printf("Matrix P\n"); for (i = 0; i < q; i++) { for (j = 0; j < q; j++) { printf("%2.4f ",PMatrix[i][j].x); } printf("\n"); } printf("\n"); // Print Q Matrix printf("Matrix Q\n"); for (i = 0; i < q; i++) { for (j = 0; j < q; j++) { printf("%2.4f ",QMatrix[i][j].x); } printf("\n"); } printf("\n"); // Print final LHS matrix and RHS vector for Ax = b printf("Before\n"); printf("LHS Matrix\n"); for (i = 0; i < q; i++) { for (j = 0; j < q; j++) { printf("%2.2f + %2.2fi ",lhsMatrix[i][j].x,lhsMatrix[i][j].y); } printf("\n"); } printf("\nRHS Vector\n"); for (i = 0; i < q; i++) printf("%2.4f + %2.4fi\n",rhsVector[i].x,rhsVector[i].y); printf("\nAfter\n"); for (int i = 0; i < q; i++) printf("A_n-%d: %f + %fi\n", i+1, cuCrealf(x_answer[i]),cuCimagf(x_answer[i])); } // Freeing dynamically allocated arrays and matrices for (i = 0; i < q; i++) { free(rMatrix[i]); free(PMatrix[i]); free(QMatrix[i]); free(lhsMatrix[i]); } free(power_series); free(rMatrix); free(PMatrix); free(QMatrix); free(lhsMatrix); free(Pn); free(Qn); free(lhsVector); if (lhsVector_d) hipFree(lhsVector_d); if (rhsVector_d) hipFree(rhsVector_d); if (rhsVector_d_float) hipFree(rhsVector_d_float); } /* This function finds the coefficients of the Taylor series expansion of * R00, which is (-1/sqrt(2))*Z(w/sqrt(2)). */ //void get_power_series(double complex *power_series, int q) { void get_power_series(hipDoubleComplex *power_series, int q) { int j; hipDoubleComplex twoI = make_cuDoubleComplex(0., 2.); hipDoubleComplex fac = make_cuDoubleComplex(1., 0.); hipDoubleComplex fac2 = make_cuDoubleComplex(sqrt(2.), 0.); hipDoubleComplex root2= make_cuDoubleComplex(sqrt(2.), 0.); hipDoubleComplex res; for (j = 0; j < q; j++) { // Below definition comes from Smith's thesis on pg. 13 res = cuCdiv(fac, fac2); // Alternate definition of above formula, works better for large number of coefficients power_series[j].x = res.x * sqrt(M_PI)*pow(2, -j)/tgamma(j/2.0 +1); power_series[j].y = res.y * sqrt(M_PI)*pow(2, -j)/tgamma(j/2.0 +1); fac = cuCmul(fac, twoI); fac2 = cuCmul(fac2, root2); } } /* This function fills the rMatrix with the power series coefficients found in get_power_series */ void fill_r_matrix(hipDoubleComplex *power_series, hipDoubleComplex **rMatrix, int q) { int i,j; for (j = 0; j < q; j++) { for (i = j; i < q; i++) { rMatrix[i][j].x = power_series[i-j].x; rMatrix[i][j].y = power_series[i-j].y; } } } /* Fills matrix with coefficients of the first n Hermite polynomial with specified scaling * and type (P for regular normalized Hermite, Q for conjugate polynomials) * Generates physicists' Hermite polynomials from 0 to n of form: 1/sqrt(j!2^j)H_j(scaling*x) */ void get_normalized_hermite_coefficients(hipDoubleComplex **matrix, int n, double scaling, char type) { int i,j; if (type == 'P') { matrix[0][0].x = 1; matrix[0][0].y = 0; matrix[1][0].x = 0; matrix[1][0].y = 0; matrix[1][1].x = sqrt(2)*scaling; matrix[1][1].y = 0; } else if (type == 'Q') { matrix[0][0].x = 0; matrix[0][0].y = 0; matrix[1][0].x = sqrt(2)*scaling; matrix[1][0].y = 0; matrix[1][1].x = 0; matrix[1][1].y = 0; } /* Calculation using standard recurrence relation of normalized Hermite polynomials: * h_i(x) = sqrt(2/i)*x*h_(i-1)(x) - sqrt((i-1)/i)*h_(i-2)(x) */ for (i = 2; i <= n; i++) { for (j = 0; j <= i; j++){ double di = (double) i; if (j == 0) { matrix[i][0].x = -sqrt((di -1)/di)*matrix[i-2][0].x; matrix[i][0].y = -sqrt((di -1)/di)*matrix[i-2][0].y; } else { if (i-2 >= j) matrix[i][j].x += -sqrt((di -1)/di) * matrix[i-2][j].x; if (i-2 >= j) matrix[i][j].y += -sqrt((di -1)/di) * matrix[i-2][j].y; if (i-1 >= j-1) matrix[i][j].x += sqrt(2/di) * scaling * matrix[i-1][j-1].x; if (i-1 >= j-1) matrix[i][j].y += sqrt(2/di) * scaling * matrix[i-1][j-1].y; } } } } /* Solve Ax = b by LU decomposition with partial pivoting */ int linearSolverLU(hipsolverDnHandle_t handle, int n, const hipDoubleComplex *Acopy, int lda, const hipDoubleComplex *b, hipDoubleComplex *x) { int bufferSize = 0; int *info = nullptr; hipDoubleComplex *buffer = nullptr; hipDoubleComplex *A = nullptr; int *ipiv = nullptr; // pivoting sequence int h_info = 0; hipsolverDnZgetrf_bufferSize(handle, n, n, (hipDoubleComplex*)Acopy, lda, &bufferSize); hipMalloc(&info, sizeof(int)); hipMalloc(&buffer, sizeof(hipDoubleComplex)*bufferSize); hipMalloc(&A, sizeof(hipDoubleComplex)*lda*n); hipMalloc(&ipiv, sizeof(int)*n); // Prepare a copy of A because getrf will overwrite A with L CP_TO_GPU(A, Acopy, sizeof(hipDoubleComplex)*lda*n); hipMemset(info, 0, sizeof(int)); hipsolverDnZgetrf(handle, n, n, A, lda, buffer, ipiv, info); CP_TO_CPU(&h_info, info, sizeof(int)); if ( 0 != h_info ){ fprintf(stderr, "Error: LU factorization failed\n"); } CP_ON_GPU(x, b, sizeof(hipDoubleComplex)*n); hipsolverDnZgetrs(handle, HIPBLAS_OP_N, n, 1, A, lda, ipiv, x, n, info); hipDeviceSynchronize(); if (info ) { hipFree(info ); } if (buffer) { hipFree(buffer); } if (A ) { hipFree(A); } if (ipiv ) { hipFree(ipiv);} return 0; }
f54a65df42feda44a835770df04fdc107c527005.cu
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <cusolverDn.h> #include <cuComplex.h> #include "parameters.h" #include "device_funcs.h" void smith_par_getAs (int n, int q, cuComplex *x_answer); void get_power_series (cuDoubleComplex *power_series, int q); void fill_r_matrix (cuDoubleComplex *power_series, cuDoubleComplex **rMatrix, int q); void get_normalized_hermite_coefficients (cuDoubleComplex **matrix, int n, double scaling, char type); int linearSolverLU (cusolverDnHandle_t handle, int n, const cuDoubleComplex *Acopy, int lda, const cuDoubleComplex *b, cuDoubleComplex *x); /* int main() { int n,q; //Request number of A's to find printf("q (# of A terms): "); scanf("%d", &q); //Request a degree for the normalized hermite polynomial printf("n (degree of Hermite moment being closed): "); scanf("%d", &n); printf("\n"); cuComplex *x_answer = (cuComplex*) malloc(q*sizeof(cuComplex)); smith_par_getAs(n,q, x_answer); for (int i = 0; i < q; i++) { printf("A_n-%d: %f + %fi\n", i+1, cuCrealf(x_answer[i]),cuCimagf(x_answer[i])); } free(x_answer); return 0; } */ /* Pade Approximant to find A coefficients * Calculation can be found on pg. 37-38 of Smith's thesis * n is the degree of the Hermite moment being closed * q is the number of coefficients used for the nth moment closure * x_answer stores the final result */ void smith_par_getAs(int n, int q, cuComplex *x_answer) { int i, j, k; // Create matrices for r, P coefficients, Q coefficients, P, Q, and the final LHS matrix cuDoubleComplex **rMatrix = (cuDoubleComplex **) malloc(q * sizeof(cuDoubleComplex *)); cuDoubleComplex **PCoefficients = (cuDoubleComplex **) malloc((n+1) * sizeof(cuDoubleComplex *)); cuDoubleComplex **QCoefficients = (cuDoubleComplex **) malloc((n+1) * sizeof(cuDoubleComplex *)); cuDoubleComplex **PMatrix = (cuDoubleComplex **) malloc(q * sizeof(cuDoubleComplex *)); cuDoubleComplex **QMatrix = (cuDoubleComplex **) malloc(q * sizeof(cuDoubleComplex *)); cuDoubleComplex **lhsMatrix = (cuDoubleComplex **) malloc(q * sizeof(cuDoubleComplex *)); for (i = 0; i < q; i++) { rMatrix[i] = (cuDoubleComplex *) calloc(q, sizeof(cuDoubleComplex)); PCoefficients[i] = (cuDoubleComplex *) calloc(i+1, sizeof(cuDoubleComplex)); QCoefficients[i] = (cuDoubleComplex *) calloc(i+1, sizeof(cuDoubleComplex)); PMatrix[i] = (cuDoubleComplex *) calloc(q, sizeof(cuDoubleComplex)); QMatrix[i] = (cuDoubleComplex *) calloc(q, sizeof(cuDoubleComplex)); lhsMatrix[i] = (cuDoubleComplex *) calloc(q, sizeof(cuDoubleComplex)); } for (i = q; i <= n; i++) { PCoefficients[i] = (cuDoubleComplex *) calloc(i+1, sizeof(cuDoubleComplex)); QCoefficients[i] = (cuDoubleComplex *) calloc(i+1, sizeof(cuDoubleComplex)); } // Create Pn and Qn vectors cuDoubleComplex *Pn = (cuDoubleComplex *) calloc(q, sizeof(cuDoubleComplex)); cuDoubleComplex *Qn = (cuDoubleComplex *) calloc(q, sizeof(cuDoubleComplex)); // Create lhsVector and rhsVector (lhsVector is the final LHS matrix as a single vector; // rhsVector is the b vector in Ax = b) cuDoubleComplex *lhsVector = (cuDoubleComplex *) calloc(q*q, sizeof(cuDoubleComplex)); cuDoubleComplex *rhsVector = (cuDoubleComplex *) calloc(q, sizeof(cuDoubleComplex)); //Create power_series array cuDoubleComplex *power_series = (cuDoubleComplex *) calloc(q, sizeof(cuDoubleComplex)); cuDoubleComplex I = make_cuDoubleComplex(0., 1.); // Fill power_series array and rMatrix get_power_series(power_series, q); fill_r_matrix(power_series, rMatrix, q); // Get P and Q coefficients get_normalized_hermite_coefficients(PCoefficients, n, 1/sqrt(2), 'P'); get_normalized_hermite_coefficients(QCoefficients, n, 1/sqrt(2), 'Q'); //Fill P and Q matrices and vectors int col = 0; // column of P and Q matrix filled int d; // degree of polynomial filling each column for(d = n; d >= n - q; d--){ if (d != n) { for (i = 0; i < q; i++) { // Only filling indices where the degree of the term is less than // or equal to the degree of the corresponding polynomial if (i <= d) { PMatrix[i][col] = PCoefficients[d][i]; QMatrix[i][col] = QCoefficients[d][i]; } } col++; } else { for (i = 0; i < q; i++) { Pn[i] = PCoefficients[n][i]; Qn[i] = QCoefficients[n][i]; } } } // Freeing PCoefficients and QCoefficients for (i = 0; i <= n; i++) { free(PCoefficients[i]); free(QCoefficients[i]); } free(PCoefficients); free(QCoefficients); // Get LHS matrix cuDoubleComplex sum; for(i = 0; i < q; i++){ for(j = 0; j < q; j++){ sum.x = 0; sum.y = 0; for(k = 0; k < q; k++){ sum = cuCadd(sum, cuCmul(rMatrix[i][k], PMatrix[k][j])); } lhsMatrix[i][j] = cuCsub(sum, cuCmul(I,QMatrix[i][j])); } } //Transpose of lhsMatrix and putting into one vector for cuSolver/cuBlas call below for(i = 0; i < q; i++){ for(j = 0; j < q; j++){ lhsVector[j + q*i] = lhsMatrix[j][i]; } } // Get RHS vector for(j = 0; j < q; j++){ sum.x = 0; sum.y = 0; for(k = 0; k < q; k++){ sum = cuCadd(sum, cuCmul(rMatrix[j][k],Pn[k])); } rhsVector[j] = cuCsub(sum, cuCmul(I,Qn[j])); } // Creating CUDA array copy of lhsVector cuDoubleComplex * lhsVector_d = nullptr; cudaMalloc(&lhsVector_d, q*q*sizeof(cuDoubleComplex)); // cudaMemcpy(lhsVector_d, (cuDoubleComplex*) lhsVector, q*q*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice); CP_TO_GPU (lhsVector_d, (cuDoubleComplex*) lhsVector, q*q*sizeof(cuDoubleComplex)); // Creating CUDA array copy of rhsVector cuDoubleComplex *rhsVector_d = nullptr; cudaMalloc(&rhsVector_d, q*sizeof(cuDoubleComplex)); // cudaMemcpy(rhsVector_d, (cuDoubleComplex*) rhsVector, q*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice); CP_TO_GPU (rhsVector_d, (cuDoubleComplex*) rhsVector, q*sizeof(cuDoubleComplex)); // Setting up CUDA handlers and stream cusolverDnHandle_t handle = NULL; cublasHandle_t cublasHandle = NULL; cudaStream_t stream = NULL; cusolverDnCreate(&handle); cublasCreate(&cublasHandle); cudaStreamCreate(&stream); cusolverDnSetStream(handle, stream); cublasSetStream(cublasHandle, stream); // Calling cuSolver and cuBLAS libraries to solve Ax = b linearSolverLU(handle, q, lhsVector_d, q, rhsVector_d, rhsVector_d); // Converting cuDoubleComplex answer to cuComplex and storing it in x_answer cuComplex * rhsVector_d_float = nullptr; cudaMalloc(&rhsVector_d_float, q*q*sizeof(cuComplex)); castDoubleToFloat<<<1,1>>>(rhsVector_d, rhsVector_d_float, q); CP_TO_CPU (x_answer, rhsVector_d_float, sizeof(cuComplex)*q); // Print only if debugging if (0==1) { // Print r values printf("r coefficients\n"); for (i = 0; i < q; i++) printf("r_%d: %f + i%f\n", i, power_series[i].x,power_series[i].y); // Print r matrix printf("\nMatrix r\n"); for (i = 0; i < q; i++) { for (j = 0; j < q; j++) { printf("%2.2f + %2.2fi ",rMatrix[i][j].x,rMatrix[i][j].y); } printf("\n"); } printf("\n"); // Print P matrix printf("Matrix P\n"); for (i = 0; i < q; i++) { for (j = 0; j < q; j++) { printf("%2.4f ",PMatrix[i][j].x); } printf("\n"); } printf("\n"); // Print Q Matrix printf("Matrix Q\n"); for (i = 0; i < q; i++) { for (j = 0; j < q; j++) { printf("%2.4f ",QMatrix[i][j].x); } printf("\n"); } printf("\n"); // Print final LHS matrix and RHS vector for Ax = b printf("Before\n"); printf("LHS Matrix\n"); for (i = 0; i < q; i++) { for (j = 0; j < q; j++) { printf("%2.2f + %2.2fi ",lhsMatrix[i][j].x,lhsMatrix[i][j].y); } printf("\n"); } printf("\nRHS Vector\n"); for (i = 0; i < q; i++) printf("%2.4f + %2.4fi\n",rhsVector[i].x,rhsVector[i].y); printf("\nAfter\n"); for (int i = 0; i < q; i++) printf("A_n-%d: %f + %fi\n", i+1, cuCrealf(x_answer[i]),cuCimagf(x_answer[i])); } // Freeing dynamically allocated arrays and matrices for (i = 0; i < q; i++) { free(rMatrix[i]); free(PMatrix[i]); free(QMatrix[i]); free(lhsMatrix[i]); } free(power_series); free(rMatrix); free(PMatrix); free(QMatrix); free(lhsMatrix); free(Pn); free(Qn); free(lhsVector); if (lhsVector_d) cudaFree(lhsVector_d); if (rhsVector_d) cudaFree(rhsVector_d); if (rhsVector_d_float) cudaFree(rhsVector_d_float); } /* This function finds the coefficients of the Taylor series expansion of * R00, which is (-1/sqrt(2))*Z(w/sqrt(2)). */ //void get_power_series(double complex *power_series, int q) { void get_power_series(cuDoubleComplex *power_series, int q) { int j; cuDoubleComplex twoI = make_cuDoubleComplex(0., 2.); cuDoubleComplex fac = make_cuDoubleComplex(1., 0.); cuDoubleComplex fac2 = make_cuDoubleComplex(sqrt(2.), 0.); cuDoubleComplex root2= make_cuDoubleComplex(sqrt(2.), 0.); cuDoubleComplex res; for (j = 0; j < q; j++) { // Below definition comes from Smith's thesis on pg. 13 res = cuCdiv(fac, fac2); // Alternate definition of above formula, works better for large number of coefficients power_series[j].x = res.x * sqrt(M_PI)*pow(2, -j)/tgamma(j/2.0 +1); power_series[j].y = res.y * sqrt(M_PI)*pow(2, -j)/tgamma(j/2.0 +1); fac = cuCmul(fac, twoI); fac2 = cuCmul(fac2, root2); } } /* This function fills the rMatrix with the power series coefficients found in get_power_series */ void fill_r_matrix(cuDoubleComplex *power_series, cuDoubleComplex **rMatrix, int q) { int i,j; for (j = 0; j < q; j++) { for (i = j; i < q; i++) { rMatrix[i][j].x = power_series[i-j].x; rMatrix[i][j].y = power_series[i-j].y; } } } /* Fills matrix with coefficients of the first n Hermite polynomial with specified scaling * and type (P for regular normalized Hermite, Q for conjugate polynomials) * Generates physicists' Hermite polynomials from 0 to n of form: 1/sqrt(j!2^j)H_j(scaling*x) */ void get_normalized_hermite_coefficients(cuDoubleComplex **matrix, int n, double scaling, char type) { int i,j; if (type == 'P') { matrix[0][0].x = 1; matrix[0][0].y = 0; matrix[1][0].x = 0; matrix[1][0].y = 0; matrix[1][1].x = sqrt(2)*scaling; matrix[1][1].y = 0; } else if (type == 'Q') { matrix[0][0].x = 0; matrix[0][0].y = 0; matrix[1][0].x = sqrt(2)*scaling; matrix[1][0].y = 0; matrix[1][1].x = 0; matrix[1][1].y = 0; } /* Calculation using standard recurrence relation of normalized Hermite polynomials: * h_i(x) = sqrt(2/i)*x*h_(i-1)(x) - sqrt((i-1)/i)*h_(i-2)(x) */ for (i = 2; i <= n; i++) { for (j = 0; j <= i; j++){ double di = (double) i; if (j == 0) { matrix[i][0].x = -sqrt((di -1)/di)*matrix[i-2][0].x; matrix[i][0].y = -sqrt((di -1)/di)*matrix[i-2][0].y; } else { if (i-2 >= j) matrix[i][j].x += -sqrt((di -1)/di) * matrix[i-2][j].x; if (i-2 >= j) matrix[i][j].y += -sqrt((di -1)/di) * matrix[i-2][j].y; if (i-1 >= j-1) matrix[i][j].x += sqrt(2/di) * scaling * matrix[i-1][j-1].x; if (i-1 >= j-1) matrix[i][j].y += sqrt(2/di) * scaling * matrix[i-1][j-1].y; } } } } /* Solve Ax = b by LU decomposition with partial pivoting */ int linearSolverLU(cusolverDnHandle_t handle, int n, const cuDoubleComplex *Acopy, int lda, const cuDoubleComplex *b, cuDoubleComplex *x) { int bufferSize = 0; int *info = nullptr; cuDoubleComplex *buffer = nullptr; cuDoubleComplex *A = nullptr; int *ipiv = nullptr; // pivoting sequence int h_info = 0; cusolverDnZgetrf_bufferSize(handle, n, n, (cuDoubleComplex*)Acopy, lda, &bufferSize); cudaMalloc(&info, sizeof(int)); cudaMalloc(&buffer, sizeof(cuDoubleComplex)*bufferSize); cudaMalloc(&A, sizeof(cuDoubleComplex)*lda*n); cudaMalloc(&ipiv, sizeof(int)*n); // Prepare a copy of A because getrf will overwrite A with L CP_TO_GPU(A, Acopy, sizeof(cuDoubleComplex)*lda*n); cudaMemset(info, 0, sizeof(int)); cusolverDnZgetrf(handle, n, n, A, lda, buffer, ipiv, info); CP_TO_CPU(&h_info, info, sizeof(int)); if ( 0 != h_info ){ fprintf(stderr, "Error: LU factorization failed\n"); } CP_ON_GPU(x, b, sizeof(cuDoubleComplex)*n); cusolverDnZgetrs(handle, CUBLAS_OP_N, n, 1, A, lda, ipiv, x, n, info); cudaDeviceSynchronize(); if (info ) { cudaFree(info ); } if (buffer) { cudaFree(buffer); } if (A ) { cudaFree(A); } if (ipiv ) { cudaFree(ipiv);} return 0; }
618ccd8bf3d2c218bf92b4768e923793e96481fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ /** * @author Yan haixu * Contact: just github.com/hova88 * @date 2021/04/30 */ #include <stdio.h> #include "nms.h" #include "common.h" #define THREADS_PER_BLOCK 16 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y){ x = _x, y = _y; } __device__ void set(float _x, float _y){ x = _x; y = _y; } __device__ Point operator +(const Point &b)const{ return Point(x + b.x, y + b.y); } __device__ Point operator -(const Point &b)const{ return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b){ return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && min(q1.x,q2.x) <= max(p1.x,p2.x) && min(p1.y,p2.y) <= max(q1.y,q2.y) && min(q1.y,q2.y) <= max(p1.y,p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p){ //params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if(fabs(s5 - s1) > EPS){ ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else{ float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p){ float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center){ return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b){ // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); #ifdef DEBUG printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); #endif Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++){ #ifdef DEBUG printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); #ifdef DEBUG printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag){ poly_center = poly_center + cross_points[cnt]; cnt++; #ifdef DEBUG printf("Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, %.3f)->(%.3f, %.3f) \n", cross_points[cnt - 1].x, cross_points[cnt - 1].y, box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y); #endif } } } // check corners for (int k = 0; k < 4; k++){ if (check_in_box2d(box_a, box_b_corners[k])){ poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } if (check_in_box2d(box_b, box_a_corners[k])){ poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++){ for (int i = 0; i < cnt - j - 1; i++){ if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); for (int i = 0; i < cnt; i++){ printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++){ area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b){ // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){ // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b){ return; } const float * cur_box_a = boxes_a + a_idx * 7; const float * cur_box_b = boxes_b + b_idx * 7; float s_overlap = box_overlap(cur_box_a, cur_box_b); ans_overlap[a_idx * num_b + b_idx] = s_overlap; } __global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){ // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b){ return; } const float * cur_box_a = boxes_a + a_idx * 7; const float * cur_box_b = boxes_b + b_idx * 7; float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; } __global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask){ //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] //params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __device__ inline float iou_normal(float const * const a, float const * const b) { //params: a: [x, y, z, dx, dy, dz, heading] //params: b: [x, y, z, dx, dy, dz, heading] float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2); float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = a[3] * a[4]; float Sb = b[3] * b[4]; return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask){ //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] //params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } NmsCuda::NmsCuda(const int num_threads, const int num_box_corners, const float nms_overlap_threshold) : num_threads_(num_threads), num_box_corners_(num_box_corners), nms_overlap_threshold_(nms_overlap_threshold) {} void NmsCuda::DoNmsCuda(const int host_filter_count, float *dev_sorted_box_for_nms, long *out_keep_inds, int *out_num_to_keep) { const int col_blocks = DIVUP(host_filter_count, num_threads_); unsigned long long *dev_mask = NULL; GPU_CHECK(hipMalloc(&dev_mask, host_filter_count * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(host_filter_count, num_threads_), DIVUP(host_filter_count, num_threads_)); dim3 threads(num_threads_); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, host_filter_count, nms_overlap_threshold_, dev_sorted_box_for_nms, dev_mask); // postprocess for nms output std::vector<unsigned long long> host_mask(host_filter_count * col_blocks); GPU_CHECK(hipMemcpy(&host_mask[0], dev_mask, sizeof(unsigned long long) * host_filter_count * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); for (int i = 0; i < host_filter_count; ++i) { int nblock = i / num_threads_; int inblock = i % num_threads_; if (!(remv[nblock] & (1ULL << inblock))) { out_keep_inds[(*out_num_to_keep)++] = i; unsigned long long *p = &host_mask[0] + i * col_blocks; for (int j = nblock; j < col_blocks; ++j) { remv[j] |= p[j]; } } } GPU_CHECK(hipFree(dev_mask)); }
618ccd8bf3d2c218bf92b4768e923793e96481fc.cu
/* 3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) Written by Shaoshuai Shi All Rights Reserved 2019-2020. */ /** * @author Yan haixu * Contact: just github.com/hova88 * @date 2021/04/30 */ #include <stdio.h> #include "nms.h" #include "common.h" #define THREADS_PER_BLOCK 16 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; const float EPS = 1e-8; struct Point { float x, y; __device__ Point() {} __device__ Point(double _x, double _y){ x = _x, y = _y; } __device__ void set(float _x, float _y){ x = _x; y = _y; } __device__ Point operator +(const Point &b)const{ return Point(x + b.x, y + b.y); } __device__ Point operator -(const Point &b)const{ return Point(x - b.x, y - b.y); } }; __device__ inline float cross(const Point &a, const Point &b){ return a.x * b.y - a.y * b.x; } __device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); } __device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && min(q1.x,q2.x) <= max(p1.x,p2.x) && min(p1.y,p2.y) <= max(q1.y,q2.y) && min(q1.y,q2.y) <= max(p1.y,p2.y); return ret; } __device__ inline int check_in_box2d(const float *box, const Point &p){ //params: (7) [x, y, z, dx, dy, dz, heading] const float MARGIN = 1e-2; float center_x = box[0], center_y = box[1]; float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); } __device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ // fast exclusion if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; // check cross standing float s1 = cross(q0, p1, p0); float s2 = cross(p1, q1, p0); float s3 = cross(p0, q1, q0); float s4 = cross(q1, p1, q0); if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; // calculate intersection of two lines float s5 = cross(q1, p1, p0); if(fabs(s5 - s1) > EPS){ ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); } else{ float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; float D = a0 * b1 - a1 * b0; ans.x = (b0 * c1 - b1 * c0) / D; ans.y = (a1 * c0 - a0 * c1) / D; } return 1; } __device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p){ float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; p.set(new_x, new_y); } __device__ inline int point_cmp(const Point &a, const Point &b, const Point &center){ return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); } __device__ inline float box_overlap(const float *box_a, const float *box_b){ // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float a_angle = box_a[6], b_angle = box_b[6]; float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; Point center_a(box_a[0], box_a[1]); Point center_b(box_b[0], box_b[1]); #ifdef DEBUG printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle); printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); #endif Point box_a_corners[5]; box_a_corners[0].set(a_x1, a_y1); box_a_corners[1].set(a_x2, a_y1); box_a_corners[2].set(a_x2, a_y2); box_a_corners[3].set(a_x1, a_y2); Point box_b_corners[5]; box_b_corners[0].set(b_x1, b_y1); box_b_corners[1].set(b_x2, b_y1); box_b_corners[2].set(b_x2, b_y2); box_b_corners[3].set(b_x1, b_y2); // get oriented corners float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); for (int k = 0; k < 4; k++){ #ifdef DEBUG printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); #ifdef DEBUG printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); #endif } box_a_corners[4] = box_a_corners[0]; box_b_corners[4] = box_b_corners[0]; // get intersection of lines Point cross_points[16]; Point poly_center; int cnt = 0, flag = 0; poly_center.set(0, 0); for (int i = 0; i < 4; i++){ for (int j = 0; j < 4; j++){ flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); if (flag){ poly_center = poly_center + cross_points[cnt]; cnt++; #ifdef DEBUG printf("Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, %.3f)->(%.3f, %.3f) \n", cross_points[cnt - 1].x, cross_points[cnt - 1].y, box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y, box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y); #endif } } } // check corners for (int k = 0; k < 4; k++){ if (check_in_box2d(box_a, box_b_corners[k])){ poly_center = poly_center + box_b_corners[k]; cross_points[cnt] = box_b_corners[k]; cnt++; #ifdef DEBUG printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } if (check_in_box2d(box_b, box_a_corners[k])){ poly_center = poly_center + box_a_corners[k]; cross_points[cnt] = box_a_corners[k]; cnt++; #ifdef DEBUG printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); #endif } } poly_center.x /= cnt; poly_center.y /= cnt; // sort the points of polygon Point temp; for (int j = 0; j < cnt - 1; j++){ for (int i = 0; i < cnt - j - 1; i++){ if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ temp = cross_points[i]; cross_points[i] = cross_points[i + 1]; cross_points[i + 1] = temp; } } } #ifdef DEBUG printf("cnt=%d\n", cnt); for (int i = 0; i < cnt; i++){ printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); } #endif // get the overlap areas float area = 0; for (int k = 0; k < cnt - 1; k++){ area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); } return fabs(area) / 2.0; } __device__ inline float iou_bev(const float *box_a, const float *box_b){ // params box_a: [x, y, z, dx, dy, dz, heading] // params box_b: [x, y, z, dx, dy, dz, heading] float sa = box_a[3] * box_a[4]; float sb = box_b[3] * box_b[4]; float s_overlap = box_overlap(box_a, box_b); return s_overlap / fmaxf(sa + sb - s_overlap, EPS); } __global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){ // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b){ return; } const float * cur_box_a = boxes_a + a_idx * 7; const float * cur_box_b = boxes_b + b_idx * 7; float s_overlap = box_overlap(cur_box_a, cur_box_b); ans_overlap[a_idx * num_b + b_idx] = s_overlap; } __global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){ // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; if (a_idx >= num_a || b_idx >= num_b){ return; } const float * cur_box_a = boxes_a + a_idx * 7; const float * cur_box_b = boxes_b + b_idx * 7; float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; } __global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask){ //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] //params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } __device__ inline float iou_normal(float const * const a, float const * const b) { //params: a: [x, y, z, dx, dy, dz, heading] //params: b: [x, y, z, dx, dy, dz, heading] float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2); float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2); float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); float interS = width * height; float Sa = a[3] * a[4]; float Sb = b[3] * b[4]; return interS / fmaxf(Sa + Sb - interS, EPS); } __global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long *mask){ //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] //params: mask (N, N/THREADS_PER_BLOCK_NMS) const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; const float *cur_box = boxes + cur_box_idx * 7; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ t |= 1ULL << i; } } const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); mask[cur_box_idx * col_blocks + col_start] = t; } } NmsCuda::NmsCuda(const int num_threads, const int num_box_corners, const float nms_overlap_threshold) : num_threads_(num_threads), num_box_corners_(num_box_corners), nms_overlap_threshold_(nms_overlap_threshold) {} void NmsCuda::DoNmsCuda(const int host_filter_count, float *dev_sorted_box_for_nms, long *out_keep_inds, int *out_num_to_keep) { const int col_blocks = DIVUP(host_filter_count, num_threads_); unsigned long long *dev_mask = NULL; GPU_CHECK(cudaMalloc(&dev_mask, host_filter_count * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(host_filter_count, num_threads_), DIVUP(host_filter_count, num_threads_)); dim3 threads(num_threads_); nms_kernel<<<blocks, threads>>>(host_filter_count, nms_overlap_threshold_, dev_sorted_box_for_nms, dev_mask); // postprocess for nms output std::vector<unsigned long long> host_mask(host_filter_count * col_blocks); GPU_CHECK(cudaMemcpy(&host_mask[0], dev_mask, sizeof(unsigned long long) * host_filter_count * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); for (int i = 0; i < host_filter_count; ++i) { int nblock = i / num_threads_; int inblock = i % num_threads_; if (!(remv[nblock] & (1ULL << inblock))) { out_keep_inds[(*out_num_to_keep)++] = i; unsigned long long *p = &host_mask[0] + i * col_blocks; for (int j = nblock; j < col_blocks; ++j) { remv[j] |= p[j]; } } } GPU_CHECK(cudaFree(dev_mask)); }
121c0801f57cbe63f8ea1ed29a683ffc9fbdf0c1.hip
// !!! This is a file automatically generated by hipify!!! #include <limits> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/AccumulateType.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Math.cuh> #include <ATen/native/hip/zmath.cuh> namespace at { namespace native { // We manually overload acos because std::acos does not work with thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t acos_wrapper(scalar_t v) { return ::acos(v); } template<typename T> __host__ __device__ static inline thrust::complex<T> acos_wrapper(thrust::complex<T> v) { return thrust::acos(v); } void acos_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "acos_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return acos_wrapper(a); }); }); } // We manually overload asin because std::asin does not work with thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t asin_wrapper(scalar_t v) { return ::asin(v); } template<typename T> __host__ __device__ static inline thrust::complex<T> asin_wrapper(thrust::complex<T> v) { return thrust::asin(v); } void asin_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "asin_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return asin_wrapper(a); }); }); } // We manually overload sin because std::sin does not work with thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t sin_wrapper(scalar_t v) { return ::sin(v); } template<typename T> __host__ __device__ static inline thrust::complex<T> sin_wrapper(thrust::complex<T> v) { return thrust::sin(v); } void sin_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "sin_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return sin_wrapper(a); }); }); } // We manually overload sinh because std::sinh does not work with thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t sinh_wrapper(scalar_t v) { return ::sinh(v); } template<typename T> __host__ __device__ static inline thrust::complex<T> sinh_wrapper(thrust::complex<T> v) { return thrust::sinh(v); } void sinh_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "sinh_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return sinh_wrapper(a); }); }); } REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda); REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda); REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda); REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda); }} // namespace at::native
121c0801f57cbe63f8ea1ed29a683ffc9fbdf0c1.cu
#include <limits> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/AccumulateType.h> #include <ATen/Context.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/zmath.cuh> namespace at { namespace native { // We manually overload acos because std::acos does not work with thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t acos_wrapper(scalar_t v) { return ::acos(v); } template<typename T> __host__ __device__ static inline thrust::complex<T> acos_wrapper(thrust::complex<T> v) { return thrust::acos(v); } void acos_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "acos_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return acos_wrapper(a); }); }); } // We manually overload asin because std::asin does not work with thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t asin_wrapper(scalar_t v) { return ::asin(v); } template<typename T> __host__ __device__ static inline thrust::complex<T> asin_wrapper(thrust::complex<T> v) { return thrust::asin(v); } void asin_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "asin_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return asin_wrapper(a); }); }); } // We manually overload sin because std::sin does not work with thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t sin_wrapper(scalar_t v) { return ::sin(v); } template<typename T> __host__ __device__ static inline thrust::complex<T> sin_wrapper(thrust::complex<T> v) { return thrust::sin(v); } void sin_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "sin_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return sin_wrapper(a); }); }); } // We manually overload sinh because std::sinh does not work with thrust::complex types. template<typename scalar_t> __host__ __device__ static inline scalar_t sinh_wrapper(scalar_t v) { return ::sinh(v); } template<typename T> __host__ __device__ static inline thrust::complex<T> sinh_wrapper(thrust::complex<T> v) { return thrust::sinh(v); } void sinh_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "sinh_cuda", [&]() { using thrust_t = typename ztype_cuda<scalar_t>::thrust_t; gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t { return sinh_wrapper(a); }); }); } REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda); REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda); REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda); REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda); }} // namespace at::native
0b2284914a9f8c7463ad8bfa94cf212aa014208d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<fstream> #include<cmath> #include<string> #include<thrust/host_vector.h> #include<thrust/device_vector.h> #include<cuda_runtime.h> #include "param.cpp" #include "input.cpp" #include "vel_verlet.cpp" #include "output.cpp" using namespace std; void cudasafe(int error, string message, string file, int line) { if (error != hipSuccess) { cout<<stderr<< " CUDA Error: "<<message<<" : "<<error<<". In "<<file<<" line "<<line<<endl; exit(-1); } } int main(){ //reading input parameters string paramFileName ="blocks.par",input_path = "../Question/input/"; string part_input_file, part_out_name_base, vtk_out_name_base; double timeStep, timeEnd, epsilon, sigma; unsigned part_out_freq, vtk_out_freq, cl_wg_1dsize; // reading .par file { // in param.cpp file readParam( input_path + paramFileName, part_input_file, part_out_name_base, vtk_out_name_base, timeStep, timeEnd, epsilon, sigma, part_out_freq, vtk_out_freq, cl_wg_1dsize ); // outParam( // part_input_file, part_out_name_base, // vtk_out_name_base, timeStep, timeEnd, epsilon, sigma, // part_out_freq, vtk_out_freq, cl_wg_1dsize // ); } // declearing host vector memory int N, dim, frames = (timeEnd/timeStep); // frames -> # of timeframes // N -> # of particles // dim -> dimension of vector thrust::host_vector<double> sliced; readInput(input_path + part_input_file,sliced,N,dim); // in input.cpp host_vector<double> x(N*dim,0), v(N*dim,0), m(N,0); // extracting m,x,v data from sliced { extract( raw_pointer_cast(&x[0]), raw_pointer_cast(&v[0]), raw_pointer_cast(&m[0]), raw_pointer_cast(sliced.data()), N, dim ); // outInput( // raw_pointer_cast(x.data()), // raw_pointer_cast(v.data()), // raw_pointer_cast(m.data()), // N,dim // ); // in input.cpp } // CUDA Programming device_vector<double> d_x(x),d_v(v),d_f(N*dim,0),d_f_old(N*dim,0),zeros(N*dim,0),d_m(m); hipDeviceProp_t deviceProp; cudasafe( hipGetDeviceProperties(&deviceProp,0), "Get device Properties", __FILE__, __LINE__ ); // cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<endl; int blockSize = deviceProp.maxThreadsPerBlock, gridSize = (N/deviceProp.maxThreadsPerBlock)+1; // cout<<"Block Size: "<<blockSize<<"\nGrid size: "<<gridSize<<endl // <<"Frames: "<<frames<<endl; // Initial force calculation hipLaunchKernelGGL(( calF), dim3(gridSize), dim3(blockSize), 0, 0, raw_pointer_cast(&d_x[0]), raw_pointer_cast(&d_f[0]), N, dim, epsilon, sigma ); cudasafe( hipDeviceSynchronize(), "sync threads", __FILE__, __LINE__ ); // for (int i=0; i<N; i++){ // for (int j=0; j<dim; j++) // cout<<d_f[i*dim+j]<<"\t"; // } // cout<<endl; writeOut( part_out_name_base, 0, raw_pointer_cast(&m[0]), raw_pointer_cast(&x[0]), raw_pointer_cast(&v[0]), N, dim ); // in output.cpp writeVTK( vtk_out_name_base, 0, raw_pointer_cast(&m[0]), raw_pointer_cast(&x[0]), raw_pointer_cast(&v[0]), N, dim ); // in output.cpp for(int i=1; i<=frames; i++){ hipLaunchKernelGGL(( calX), dim3(gridSize), dim3(blockSize), 0, 0, raw_pointer_cast(&d_x[0]), raw_pointer_cast(&d_v[0]), raw_pointer_cast(&d_f[0]), raw_pointer_cast(&d_m[0]), timeStep, N, dim ); cudasafe( hipDeviceSynchronize(), "sync threads", __FILE__, __LINE__ ); d_f_old = d_f; thrust::copy(zeros.begin(), zeros.end(), d_f.begin()); hipLaunchKernelGGL(( calF), dim3(gridSize), dim3(blockSize), 0, 0, raw_pointer_cast(&d_x[0]), raw_pointer_cast(&d_f[0]), N, dim, epsilon, sigma ); cudasafe( hipDeviceSynchronize(), "sync threads", __FILE__, __LINE__ ); hipLaunchKernelGGL(( calV), dim3(gridSize),dim3(blockSize), 0, 0, raw_pointer_cast(&d_v[0]), raw_pointer_cast(&d_f[0]), raw_pointer_cast(&d_f_old[0]), raw_pointer_cast(&d_m[0]), timeStep, N, dim ); cudasafe( hipDeviceSynchronize(), "sync threads", __FILE__, __LINE__ ); // if(i<2){ // for (int k=0; k<N*dim; k++){ // cout<<d_f[k]<<"\t"; // } // cout<<endl; // // for (int k=0; k<N*dim; k++){ // // cout<<d_x[k]<<"\t"; // // } // // cout<<endl; // for (int k=0; k<N*dim; k++){ // cout<<d_v[k]<<"\t"; // } // cout<<endl; // } if(i%part_out_freq == 0){ m = d_m; x = d_x; v = d_v; writeOut( part_out_name_base, (i/part_out_freq), raw_pointer_cast(&m[0]), raw_pointer_cast(&x[0]), raw_pointer_cast(&v[0]), N, dim ); // in output.cpp } if(i%vtk_out_freq == 0){ m = d_m; x = d_x; v = d_v; writeVTK( part_out_name_base, (i/vtk_out_freq), raw_pointer_cast(&m[0]), raw_pointer_cast(&x[0]), raw_pointer_cast(&v[0]), N, dim ); // in output.cpp } } cout<<"\n\nAll done!\n\n"; return 0; }
0b2284914a9f8c7463ad8bfa94cf212aa014208d.cu
#include<iostream> #include<fstream> #include<cmath> #include<string> #include<thrust/host_vector.h> #include<thrust/device_vector.h> #include<cuda_runtime.h> #include "param.cpp" #include "input.cpp" #include "vel_verlet.cpp" #include "output.cpp" using namespace std; void cudasafe(int error, string message, string file, int line) { if (error != cudaSuccess) { cout<<stderr<< " CUDA Error: "<<message<<" : "<<error<<". In "<<file<<" line "<<line<<endl; exit(-1); } } int main(){ //reading input parameters string paramFileName ="blocks.par",input_path = "../Question/input/"; string part_input_file, part_out_name_base, vtk_out_name_base; double timeStep, timeEnd, epsilon, sigma; unsigned part_out_freq, vtk_out_freq, cl_wg_1dsize; // reading .par file { // in param.cpp file readParam( input_path + paramFileName, part_input_file, part_out_name_base, vtk_out_name_base, timeStep, timeEnd, epsilon, sigma, part_out_freq, vtk_out_freq, cl_wg_1dsize ); // outParam( // part_input_file, part_out_name_base, // vtk_out_name_base, timeStep, timeEnd, epsilon, sigma, // part_out_freq, vtk_out_freq, cl_wg_1dsize // ); } // declearing host vector memory int N, dim, frames = (timeEnd/timeStep); // frames -> # of timeframes // N -> # of particles // dim -> dimension of vector thrust::host_vector<double> sliced; readInput(input_path + part_input_file,sliced,N,dim); // in input.cpp host_vector<double> x(N*dim,0), v(N*dim,0), m(N,0); // extracting m,x,v data from sliced { extract( raw_pointer_cast(&x[0]), raw_pointer_cast(&v[0]), raw_pointer_cast(&m[0]), raw_pointer_cast(sliced.data()), N, dim ); // outInput( // raw_pointer_cast(x.data()), // raw_pointer_cast(v.data()), // raw_pointer_cast(m.data()), // N,dim // ); // in input.cpp } // CUDA Programming device_vector<double> d_x(x),d_v(v),d_f(N*dim,0),d_f_old(N*dim,0),zeros(N*dim,0),d_m(m); cudaDeviceProp deviceProp; cudasafe( cudaGetDeviceProperties(&deviceProp,0), "Get device Properties", __FILE__, __LINE__ ); // cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<endl; int blockSize = deviceProp.maxThreadsPerBlock, gridSize = (N/deviceProp.maxThreadsPerBlock)+1; // cout<<"Block Size: "<<blockSize<<"\nGrid size: "<<gridSize<<endl // <<"Frames: "<<frames<<endl; // Initial force calculation calF<<<gridSize, blockSize>>>( raw_pointer_cast(&d_x[0]), raw_pointer_cast(&d_f[0]), N, dim, epsilon, sigma ); cudasafe( cudaDeviceSynchronize(), "sync threads", __FILE__, __LINE__ ); // for (int i=0; i<N; i++){ // for (int j=0; j<dim; j++) // cout<<d_f[i*dim+j]<<"\t"; // } // cout<<endl; writeOut( part_out_name_base, 0, raw_pointer_cast(&m[0]), raw_pointer_cast(&x[0]), raw_pointer_cast(&v[0]), N, dim ); // in output.cpp writeVTK( vtk_out_name_base, 0, raw_pointer_cast(&m[0]), raw_pointer_cast(&x[0]), raw_pointer_cast(&v[0]), N, dim ); // in output.cpp for(int i=1; i<=frames; i++){ calX<<<gridSize, blockSize>>>( raw_pointer_cast(&d_x[0]), raw_pointer_cast(&d_v[0]), raw_pointer_cast(&d_f[0]), raw_pointer_cast(&d_m[0]), timeStep, N, dim ); cudasafe( cudaDeviceSynchronize(), "sync threads", __FILE__, __LINE__ ); d_f_old = d_f; thrust::copy(zeros.begin(), zeros.end(), d_f.begin()); calF<<<gridSize, blockSize>>>( raw_pointer_cast(&d_x[0]), raw_pointer_cast(&d_f[0]), N, dim, epsilon, sigma ); cudasafe( cudaDeviceSynchronize(), "sync threads", __FILE__, __LINE__ ); calV<<<gridSize,blockSize>>>( raw_pointer_cast(&d_v[0]), raw_pointer_cast(&d_f[0]), raw_pointer_cast(&d_f_old[0]), raw_pointer_cast(&d_m[0]), timeStep, N, dim ); cudasafe( cudaDeviceSynchronize(), "sync threads", __FILE__, __LINE__ ); // if(i<2){ // for (int k=0; k<N*dim; k++){ // cout<<d_f[k]<<"\t"; // } // cout<<endl; // // for (int k=0; k<N*dim; k++){ // // cout<<d_x[k]<<"\t"; // // } // // cout<<endl; // for (int k=0; k<N*dim; k++){ // cout<<d_v[k]<<"\t"; // } // cout<<endl; // } if(i%part_out_freq == 0){ m = d_m; x = d_x; v = d_v; writeOut( part_out_name_base, (i/part_out_freq), raw_pointer_cast(&m[0]), raw_pointer_cast(&x[0]), raw_pointer_cast(&v[0]), N, dim ); // in output.cpp } if(i%vtk_out_freq == 0){ m = d_m; x = d_x; v = d_v; writeVTK( part_out_name_base, (i/vtk_out_freq), raw_pointer_cast(&m[0]), raw_pointer_cast(&x[0]), raw_pointer_cast(&v[0]), N, dim ); // in output.cpp } } cout<<"\n\nAll done!\n\n"; return 0; }
35b9e7f0bc4f6219cad057e03279944367c9578c.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <iostream> #include <chrono> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" #define THREAD_UNIT 1024 __global__ void kernal_fp32_AccuracyCategoricalClassification( float const *y_buf, float const *t_buf, int *accuracy, int node_size, int frame_size, int frame_stride ) { __shared__ int buf[THREAD_UNIT]; int frame_base = threadIdx.x; int frame_step = blockDim.x; int acc_sum = 0; for ( int frame = frame_base; frame < frame_size; frame += frame_step ) { // max float max_val = y_buf[frame]; int max_idx = 0; for ( int node = 1; node < node_size; ++node) { float val = y_buf[node * frame_stride + frame]; if ( val > max_val ) { max_val = val; max_idx = node; } } if ( t_buf[max_idx * frame_stride + frame] > 0 ) { acc_sum++; } } int prev_accuracy; if ( threadIdx.x == 0 ) { prev_accuracy = accuracy[0]; } // buf[threadIdx.x] = acc_sum; __syncthreads(); int comb = 1; while (comb < blockDim.x) { int next = comb * 2; int mask = next - 1; if ((threadIdx.x & mask) == 0) { buf[threadIdx.x] += buf[threadIdx.x + comb]; } comb = next; __syncthreads(); } if ( threadIdx.x == 0 ) { accuracy[0] = prev_accuracy + buf[0]; } } BBCU_DLL_EXPORT int bbcu_fp32_AccuracyCategoricalClassification ( const float* dev_y_buf, const float* dev_t_buf, int* dev_accuracy, int node_size, int frame_size, int frame_stride, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); // dim3 block(THREAD_UNIT); dim3 grid(1); block.x = ::min((int)block.x, (int)frame_size); hipLaunchKernelGGL(( kernal_fp32_AccuracyCategoricalClassification), dim3(grid), dim3(block), 0, streamId, dev_y_buf, dev_t_buf, dev_accuracy, node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
35b9e7f0bc4f6219cad057e03279944367c9578c.cu
#include <algorithm> #include <iostream> #include <chrono> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" #define THREAD_UNIT 1024 __global__ void kernal_fp32_AccuracyCategoricalClassification( float const *y_buf, float const *t_buf, int *accuracy, int node_size, int frame_size, int frame_stride ) { __shared__ int buf[THREAD_UNIT]; int frame_base = threadIdx.x; int frame_step = blockDim.x; int acc_sum = 0; for ( int frame = frame_base; frame < frame_size; frame += frame_step ) { // max探索 float max_val = y_buf[frame]; int max_idx = 0; for ( int node = 1; node < node_size; ++node) { float val = y_buf[node * frame_stride + frame]; if ( val > max_val ) { max_val = val; max_idx = node; } } if ( t_buf[max_idx * frame_stride + frame] > 0 ) { acc_sum++; } } int prev_accuracy; if ( threadIdx.x == 0 ) { prev_accuracy = accuracy[0]; } // スレッド間集計 buf[threadIdx.x] = acc_sum; __syncthreads(); int comb = 1; while (comb < blockDim.x) { int next = comb * 2; int mask = next - 1; if ((threadIdx.x & mask) == 0) { buf[threadIdx.x] += buf[threadIdx.x + comb]; } comb = next; __syncthreads(); } if ( threadIdx.x == 0 ) { accuracy[0] = prev_accuracy + buf[0]; } } BBCU_DLL_EXPORT int bbcu_fp32_AccuracyCategoricalClassification ( const float* dev_y_buf, const float* dev_t_buf, int* dev_accuracy, int node_size, int frame_size, int frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); // 計算 dim3 block(THREAD_UNIT); dim3 grid(1); block.x = std::min((int)block.x, (int)frame_size); kernal_fp32_AccuracyCategoricalClassification<<<grid, block, 0, streamId>>>( dev_y_buf, dev_t_buf, dev_accuracy, node_size, frame_size, frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
aee7befa7fd532da27f97587ef9fbd2e892df938.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" // ensure function name to be left alone { __global__ void normal_pdf_gpu(const double *x, double *y, unsigned int n) { // assumes a 2-d grid of 1-d blocks unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if(i<n) y[i] = exp(-0.5*x[i]*x[i])*rsqrt(2.0*M_PI); } __global__ void sum_gpu(double *y, double *sumptr, unsigned int n, unsigned int n_subsums, unsigned int percore) { // assumes a 2-d grid of 1-d blocks unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; unsigned int j = i * percore; // first element that this thread will take care of unsigned int k; if (i >= n_subsums) return; sumptr[i] = 0.0; for (k = 0; k < percore; k++) { if (j + k < n) sumptr[i] += y[j + k]; } } __global__ void get_sum_gpu(double *y, double *sumptr) // copy the result into a smaller array { sumptr[0] = y[0]; } } /* vim: set sw=4 sts=4 et : */
aee7befa7fd532da27f97587ef9fbd2e892df938.cu
extern "C" // ensure function name to be left alone { __global__ void normal_pdf_gpu(const double *x, double *y, unsigned int n) { // assumes a 2-d grid of 1-d blocks unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; if(i<n) y[i] = exp(-0.5*x[i]*x[i])*rsqrt(2.0*M_PI); } __global__ void sum_gpu(double *y, double *sumptr, unsigned int n, unsigned int n_subsums, unsigned int percore) { // assumes a 2-d grid of 1-d blocks unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x; unsigned int j = i * percore; // first element that this thread will take care of unsigned int k; if (i >= n_subsums) return; sumptr[i] = 0.0; for (k = 0; k < percore; k++) { if (j + k < n) sumptr[i] += y[j + k]; } } __global__ void get_sum_gpu(double *y, double *sumptr) // copy the result into a smaller array { sumptr[0] = y[0]; } } /* vim: set sw=4 sts=4 et : */
6f7065bbdd3eab5fef1e9b4112543ee579fcd988.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "parameters.h" #include <stdio.h> __global__ void cuda_kernel(float *B, float *A, IndexSave *dInd) { // complete cuda kernel function int TotalThread = blockDim.x * gridDim.x; int stripe = SIZE / TotalThread; int head = (blockIdx.x * blockDim.x + threadIdx.x) * stripe; int LoopLim = head + stripe; for (int i = head; i < LoopLim; i++) { dInd[i].blockInd_x = blockIdx.x; dInd[i].threadInd_x = threadIdx.x; dInd[i].head = head; dInd[i].stripe = stripe; B[i] = (B[i] - A[i]) * (B[i] - A[i]); } }; float GPU_kernel(float *B, float *A, IndexSave *indsave) { float *dA, *dB; IndexSave *dInd; // Creat Timing Event hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Allocate Memory Space on Device // Allocate Memory Space on Device (for observation) hipMalloc((void **)&dInd, sizeof(IndexSave) * SIZE); // Copy Data to be Calculated hipMalloc((void **)&dB, sizeof(float) * SIZE); hipMalloc((void **)&dA, sizeof(float) * SIZE); // Copy Data to be Calculated hipMemcpy(dB, B, sizeof(float) * SIZE, hipMemcpyHostToDevice); hipMemcpy(dA, A, sizeof(float) * SIZE, hipMemcpyHostToDevice); // Copy Data (indsave array) to device hipMemcpy(dInd, indsave, sizeof(IndexSave) * SIZE, hipMemcpyHostToDevice); // Start Timer hipEventRecord(start, 0); // Lunch Kernel dim3 dimGrid(4); dim3 dimBlock(4); hipLaunchKernelGGL(( cuda_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dB, dA, dInd); // Stop Timer hipEventRecord(stop, 0); hipEventSynchronize(stop); // Copy Output back hipMemcpy(B, dB, sizeof(float) * SIZE, hipMemcpyDeviceToHost); hipMemcpy(A, dA, sizeof(float) * SIZE, hipMemcpyDeviceToHost); hipMemcpy(indsave, dInd, sizeof(IndexSave) * SIZE, hipMemcpyDeviceToHost); // Release Memory Space on Device hipFree(dA); hipFree(dB); hipFree(dInd); // Calculate Elapsed Time float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); return elapsedTime; }
6f7065bbdd3eab5fef1e9b4112543ee579fcd988.cu
#include "parameters.h" #include <stdio.h> __global__ void cuda_kernel(float *B, float *A, IndexSave *dInd) { // complete cuda kernel function int TotalThread = blockDim.x * gridDim.x; int stripe = SIZE / TotalThread; int head = (blockIdx.x * blockDim.x + threadIdx.x) * stripe; int LoopLim = head + stripe; for (int i = head; i < LoopLim; i++) { dInd[i].blockInd_x = blockIdx.x; dInd[i].threadInd_x = threadIdx.x; dInd[i].head = head; dInd[i].stripe = stripe; B[i] = (B[i] - A[i]) * (B[i] - A[i]); } }; float GPU_kernel(float *B, float *A, IndexSave *indsave) { float *dA, *dB; IndexSave *dInd; // Creat Timing Event cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Allocate Memory Space on Device // Allocate Memory Space on Device (for observation) cudaMalloc((void **)&dInd, sizeof(IndexSave) * SIZE); // Copy Data to be Calculated cudaMalloc((void **)&dB, sizeof(float) * SIZE); cudaMalloc((void **)&dA, sizeof(float) * SIZE); // Copy Data to be Calculated cudaMemcpy(dB, B, sizeof(float) * SIZE, cudaMemcpyHostToDevice); cudaMemcpy(dA, A, sizeof(float) * SIZE, cudaMemcpyHostToDevice); // Copy Data (indsave array) to device cudaMemcpy(dInd, indsave, sizeof(IndexSave) * SIZE, cudaMemcpyHostToDevice); // Start Timer cudaEventRecord(start, 0); // Lunch Kernel dim3 dimGrid(4); dim3 dimBlock(4); cuda_kernel<<<dimGrid, dimBlock>>>(dB, dA, dInd); // Stop Timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Copy Output back cudaMemcpy(B, dB, sizeof(float) * SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(A, dA, sizeof(float) * SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(indsave, dInd, sizeof(IndexSave) * SIZE, cudaMemcpyDeviceToHost); // Release Memory Space on Device cudaFree(dA); cudaFree(dB); cudaFree(dInd); // Calculate Elapsed Time float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); return elapsedTime; }
bc0d0bb9844373f32be417210ffa75f8a23215d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_yvel_plus_2_back; int xdim0_update_halo_kernel2_yvel_plus_2_back_h = -1; __constant__ int ydim0_update_halo_kernel2_yvel_plus_2_back; int ydim0_update_halo_kernel2_yvel_plus_2_back_h = -1; __constant__ int xdim1_update_halo_kernel2_yvel_plus_2_back; int xdim1_update_halo_kernel2_yvel_plus_2_back_h = -1; __constant__ int ydim1_update_halo_kernel2_yvel_plus_2_back; int ydim1_update_halo_kernel2_yvel_plus_2_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_yvel_plus_2_back * (y) + \ xdim0_update_halo_kernel2_yvel_plus_2_back * \ ydim0_update_halo_kernel2_yvel_plus_2_back * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_yvel_plus_2_back * (y) + \ xdim1_update_halo_kernel2_yvel_plus_2_back * \ ydim1_update_halo_kernel2_yvel_plus_2_back * (z)) // user function __device__ inline void update_halo_kernel2_yvel_plus_2_back_gpu(double *yvel0, double *yvel1, const int *fields) { if (fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0, 0, 0)] = yvel0[OPS_ACC0(0, 0, 2)]; if (fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0, 0, 0)] = yvel1[OPS_ACC1(0, 0, 2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_yvel_plus_2_back( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_2_back + idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_2_back * ydim0_update_halo_kernel2_yvel_plus_2_back; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_2_back + idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_2_back * ydim1_update_halo_kernel2_yvel_plus_2_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_yvel_plus_2_back_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_yvel_plus_2_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_yvel_plus_2_back_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 45)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(45, "update_halo_kernel2_yvel_plus_2_back"); OPS_kernels[45].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_2_back_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_2_back_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_2_back_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_2_back_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_plus_2_back, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_yvel_plus_2_back_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_plus_2_back, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_yvel_plus_2_back_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_plus_2_back, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_yvel_plus_2_back_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_plus_2_back, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_yvel_plus_2_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[45].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_2_back), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[45].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[45].mpi_time += t2 - t1; OPS_kernels[45].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[45].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_yvel_plus_2_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 45; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 45; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_2_back_execute; if (OPS_diags > 1) { ops_timing_realloc(45, "update_halo_kernel2_yvel_plus_2_back"); } ops_enqueue_kernel(desc); } #endif
bc0d0bb9844373f32be417210ffa75f8a23215d8.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_yvel_plus_2_back; int xdim0_update_halo_kernel2_yvel_plus_2_back_h = -1; __constant__ int ydim0_update_halo_kernel2_yvel_plus_2_back; int ydim0_update_halo_kernel2_yvel_plus_2_back_h = -1; __constant__ int xdim1_update_halo_kernel2_yvel_plus_2_back; int xdim1_update_halo_kernel2_yvel_plus_2_back_h = -1; __constant__ int ydim1_update_halo_kernel2_yvel_plus_2_back; int ydim1_update_halo_kernel2_yvel_plus_2_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_yvel_plus_2_back * (y) + \ xdim0_update_halo_kernel2_yvel_plus_2_back * \ ydim0_update_halo_kernel2_yvel_plus_2_back * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_yvel_plus_2_back * (y) + \ xdim1_update_halo_kernel2_yvel_plus_2_back * \ ydim1_update_halo_kernel2_yvel_plus_2_back * (z)) // user function __device__ inline void update_halo_kernel2_yvel_plus_2_back_gpu(double *yvel0, double *yvel1, const int *fields) { if (fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0, 0, 0)] = yvel0[OPS_ACC0(0, 0, 2)]; if (fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0, 0, 0)] = yvel1[OPS_ACC1(0, 0, 2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_yvel_plus_2_back( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_2_back + idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_2_back * ydim0_update_halo_kernel2_yvel_plus_2_back; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_2_back + idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_2_back * ydim1_update_halo_kernel2_yvel_plus_2_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_yvel_plus_2_back_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel2_yvel_plus_2_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel2_yvel_plus_2_back_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 45)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(45, "update_halo_kernel2_yvel_plus_2_back"); OPS_kernels[45].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_2_back_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_2_back_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_2_back_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_2_back_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_plus_2_back, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_yvel_plus_2_back_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_plus_2_back, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_yvel_plus_2_back_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_plus_2_back, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_yvel_plus_2_back_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_plus_2_back, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_yvel_plus_2_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[45].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_yvel_plus_2_back<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[45].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[45].mpi_time += t2 - t1; OPS_kernels[45].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[45].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel2_yvel_plus_2_back(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 45; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 45; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_2_back_execute; if (OPS_diags > 1) { ops_timing_realloc(45, "update_halo_kernel2_yvel_plus_2_back"); } ops_enqueue_kernel(desc); } #endif
42b6798dfe0b0e30b3513f43ab93f91911ad8b61.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <energymin/energymin_amg_level.h> #include <amg_level.h> #include <basic_types.h> #include <cutil.h> #include <multiply.h> #include <transpose.h> #include <blas.h> #include <util.h> #include <thrust/logical.h> #include <thrust/remove.h> #include <thrust/adjacent_difference.h> #include <assert.h> #include <matrix_io.h> #include <csr_multiply.h> #include <thrust/logical.h> #include <thrust/count.h> #include <thrust/sort.h> #include <profile.h> #include <string> #include <algorithm> namespace amgx { namespace energymin { // --------------------------- Begin Base Class Public methods ------------------------------------ template <class T_Config> Energymin_AMG_Level_Base<T_Config> ::Energymin_AMG_Level_Base(AMG_Class *amg) : AMG_Level<T_Config>(amg) { selector = amgx::classical::SelectorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); interpolator = InterpolatorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); strength = NULL; std::string selector_val = amg->m_cfg->template getParameter<std::string>("selector", amg->m_cfg_scope); if (selector_val == "PMIS") //or any other classical selector { strength = StrengthFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); //using default strength max_row_sum = amg->m_cfg->AMG_Config::getParameter<double>("max_row_sum", amg->m_cfg_scope); } } template <class T_Config> Energymin_AMG_Level_Base<T_Config>::~Energymin_AMG_Level_Base() { delete selector; delete interpolator; if (strength != NULL) { delete strength; } } // Compute A, P, and R operators template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::createCoarseVertices() { Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA(); Matrix<T_Config> &A = this->getA(); int size_all; size_all = A.get_num_rows(); this->m_cf_map.resize(size_all); thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0); cudaCheckError(); markCoarseFinePoints(); } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::createCoarseMatrices() { Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA(); Matrix<T_Config> &A = this->getA(); /* WARNING: exit if D1 interpolator is selected in distributed setting */ std::string s(""); s += AMG_Level<T_Config>::amg->m_cfg->AMG_Config ::getParameter<std::string>("energymin_interpolator", AMG_Level<T_Config>::amg->m_cfg_scope); // Compute Restriction operator computeRestrictionOperator(); // Compute Prolongation operator and coarse matrix Ac if (!this->A->is_matrix_distributed() || this->A->manager->get_num_partitions() == 1) { // Create Prolongation operator computeProlongationOperator(); computeAOperator(); } else { computeAOperator_distributed(); } RAP.copyAuxData(&A); if (this->getA().is_matrix_singleGPU()) { this->m_next_level_size = this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_num_rows() * this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy(); } else { // m_next_level_size is the size that will be used to allocate xc, bc vectors int size, offset; this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().getOffsetAndSizeForView(FULL, &offset, &size); this->m_next_level_size = size * this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy(); } } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::markCoarseFinePoints() { Matrix<T_Config> &A = this->getA(); // Allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; int size_all, size_full, nnz_full; BVector m_s_con; IVector m_scratch; FVector weights; if (!A.is_matrix_singleGPU()) { int offset; // Need to get number of 2-ring rows A.getOffsetAndSizeForView(ALL, &offset, &size_all); A.getOffsetAndSizeForView(FULL, &offset, &size_full); A.getNnzForView(FULL, &nnz_full); weights.resize(size_full); } else { size_all = A.get_num_rows(); size_full = A.get_num_rows(); nnz_full = A.get_num_nz(); weights.resize(A.get_num_rows()); } this->m_cf_map.resize(size_all); m_s_con.resize(nnz_full); m_scratch.resize(size_full); thrust::fill(weights.begin(), weights.end(), 0.0); cudaCheckError(); thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0); cudaCheckError(); thrust::fill(m_s_con.begin(), m_s_con.end(), false); cudaCheckError(); thrust::fill(m_scratch.begin(), m_scratch.end(), 0); cudaCheckError(); if (strength != NULL) { if (!A.is_matrix_singleGPU()) { ViewType oldView = A.currentView(); A.setView(FULL); strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum); A.setView(oldView); A.manager->exchange_halo(weights, weights.tag); } else { strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum); } } // Mark coarse and fine points selector->markCoarseFinePoints(A, weights, m_s_con, this->m_cf_map, m_scratch); this->m_cf_map.dirtybit = 1; } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeRestrictionOperator() { this->Profile.tic("computeR"); Matrix<T_Config> &A = this->getA(); //allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; // WARNING: Since energymin P is in computed in CSC format and AMGX does not support // CSC format, we are actually computing P^T (=R) in generateInterpolationMatrix!! //generate the interpolation matrix interpolator->generateInterpolationMatrix(A, this->m_cf_map, R, AMG_Level<TConfig>::amg); this->m_cf_map.clear(); this->m_cf_map.shrink_to_fit(); this->Profile.toc("computeR"); } // Compute R=P^T template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeProlongationOperator() { this->Profile.tic("computeP"); P.set_initialized(0); R.setView(OWNED); transpose(R, P, R.get_num_rows()); if (this->m_min_rows_latency_hiding < 0 || P.get_num_rows() < this->m_min_rows_latency_hiding) { // This will cause bsrmv to not do latency hiding P.setInteriorView(OWNED); P.setExteriorView(OWNED); } P.set_initialized(1); this->Profile.toc("computeP"); } // Compute the Galerkin product: A_c=R*A*P template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1() { FatalError("Energymin AMG computeAOperator_1x1 not implemented on host\n", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1_distributed() { FatalError("Distributed energymin AMG not implemented for host\n", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1() { this->Profile.tic("computeA"); Matrix<TConfig_d> &RAP = this->getNextLevel( device_memory() )->getA(); RAP.addProps(CSR); RAP.set_block_dimx(this->getA().get_block_dimx()); RAP.set_block_dimy(this->getA().get_block_dimy()); this->R.set_initialized(0); this->R.addProps(CSR); this->R.set_initialized(1); this->P.set_initialized(0); this->P.addProps(CSR); this->P.set_initialized(1); void *wk = AMG_Level<TConfig_d>::amg->getCsrWorkspace(); if ( wk == NULL ) { wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *(AMG_Level<TConfig_d>::amg->m_cfg), AMG_Level<TConfig_d>::amg->m_cfg_scope ); AMG_Level<TConfig_d>::amg->setCsrWorkspace( wk ); } RAP.set_initialized(0); CSR_Multiply<TConfig_d>::csr_galerkin_product(this->R, this->getA(), this->P, RAP, NULL, NULL, NULL, NULL, NULL, NULL, wk); RAP.set_initialized(1); this->Profile.toc("computeA"); } // Compute the restriction: rr=R*r template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::restrictResidual(VVector &r, VVector &rr) { typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); // we need to resize residual vector to make sure it can store halo rows to be sent if (!P.is_matrix_singleGPU()) { int desired_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()] * rr.get_block_size()); rr.resize(desired_size); } this->Profile.tic("restrictRes"); // Disable speculative send of rr if (P.is_matrix_singleGPU()) { multiply( R, r, rr); } else { multiply_with_mask_restriction( R, r, rr, P); } rr.dirtybit = 1; // Do I need this? if (!P.is_matrix_singleGPU()) { int desired_size = P.manager->halo_offsets[P.manager->neighbors.size()] * rr.get_block_size(); // P.manager->transformVector(rr); //This is just to make sure size is right if (rr.size() < desired_size) { rr.resize(P.manager->halo_offsets[P.manager->neighbors.size()]*rr.get_block_size()); } // P.manager->exchange_halo(rr, rr.tag); } this->Profile.toc("restrictRes"); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1_distributed() { FatalError("Energymin AMG Level computeAOperator_1x1_distributed() not implemented", AMGX_ERR_NOT_IMPLEMENTED); } // Prolongate the error: x+=P*e template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::prolongateAndApplyCorrection(VVector &e, VVector &bc, VVector &x, VVector &tmp) { this->Profile.tic("proCorr"); // get coarse matrix typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); // Use P.manager to exchange halo of e before doing P // (since P has columns belonging to one of P.neighbors) e.dirtybit = 1; if (!P.is_matrix_singleGPU()) { int e_size = ::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()]) * e.get_block_size(); e.resize(e_size); } if (P.is_matrix_singleGPU()) { if (e.size() > 0) {multiply( P, e, tmp);} } else { multiply_with_mask( P, e, tmp); } // get owned num rows for fine matrix int owned_size; if (Ac.is_matrix_distributed()) { int owned_offset; P.manager->getOffsetAndSizeForView(OWNED, &owned_offset, &owned_size); } else { owned_size = x.size(); } //apply axpby(x, tmp, x, ValueType(1), ValueType(1), 0, owned_size); this->Profile.toc("proCorr"); x.dirtybit = 1; } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeAOperator() { if (this->A->get_block_size() == 1) { computeAOperator_1x1(); } else { FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeAOperator_distributed() { if (this->A->get_block_size() == 1) { computeAOperator_1x1_distributed(); } else { FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } /**************************************** * Explicit instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace energymin } // namespace amgx
42b6798dfe0b0e30b3513f43ab93f91911ad8b61.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <energymin/energymin_amg_level.h> #include <amg_level.h> #include <basic_types.h> #include <cutil.h> #include <multiply.h> #include <transpose.h> #include <blas.h> #include <util.h> #include <thrust/logical.h> #include <thrust/remove.h> #include <thrust/adjacent_difference.h> #include <assert.h> #include <matrix_io.h> #include <csr_multiply.h> #include <thrust/logical.h> #include <thrust/count.h> #include <thrust/sort.h> #include <profile.h> #include <string> #include <algorithm> namespace amgx { namespace energymin { // --------------------------- Begin Base Class Public methods ------------------------------------ template <class T_Config> Energymin_AMG_Level_Base<T_Config> ::Energymin_AMG_Level_Base(AMG_Class *amg) : AMG_Level<T_Config>(amg) { selector = amgx::classical::SelectorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); interpolator = InterpolatorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); strength = NULL; std::string selector_val = amg->m_cfg->template getParameter<std::string>("selector", amg->m_cfg_scope); if (selector_val == "PMIS") //or any other classical selector { strength = StrengthFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); //using default strength max_row_sum = amg->m_cfg->AMG_Config::getParameter<double>("max_row_sum", amg->m_cfg_scope); } } template <class T_Config> Energymin_AMG_Level_Base<T_Config>::~Energymin_AMG_Level_Base() { delete selector; delete interpolator; if (strength != NULL) { delete strength; } } // Compute A, P, and R operators template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::createCoarseVertices() { Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA(); Matrix<T_Config> &A = this->getA(); int size_all; size_all = A.get_num_rows(); this->m_cf_map.resize(size_all); thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0); cudaCheckError(); markCoarseFinePoints(); } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::createCoarseMatrices() { Matrix<T_Config> &RAP = this->getNextLevel( typename Matrix<T_Config>::memory_space() )->getA(); Matrix<T_Config> &A = this->getA(); /* WARNING: exit if D1 interpolator is selected in distributed setting */ std::string s(""); s += AMG_Level<T_Config>::amg->m_cfg->AMG_Config ::getParameter<std::string>("energymin_interpolator", AMG_Level<T_Config>::amg->m_cfg_scope); // Compute Restriction operator computeRestrictionOperator(); // Compute Prolongation operator and coarse matrix Ac if (!this->A->is_matrix_distributed() || this->A->manager->get_num_partitions() == 1) { // Create Prolongation operator computeProlongationOperator(); computeAOperator(); } else { computeAOperator_distributed(); } RAP.copyAuxData(&A); if (this->getA().is_matrix_singleGPU()) { this->m_next_level_size = this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_num_rows() * this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy(); } else { // m_next_level_size is the size that will be used to allocate xc, bc vectors int size, offset; this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().getOffsetAndSizeForView(FULL, &offset, &size); this->m_next_level_size = size * this->getNextLevel(typename Matrix<TConfig>::memory_space())->getA().get_block_dimy(); } } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::markCoarseFinePoints() { Matrix<T_Config> &A = this->getA(); // Allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; int size_all, size_full, nnz_full; BVector m_s_con; IVector m_scratch; FVector weights; if (!A.is_matrix_singleGPU()) { int offset; // Need to get number of 2-ring rows A.getOffsetAndSizeForView(ALL, &offset, &size_all); A.getOffsetAndSizeForView(FULL, &offset, &size_full); A.getNnzForView(FULL, &nnz_full); weights.resize(size_full); } else { size_all = A.get_num_rows(); size_full = A.get_num_rows(); nnz_full = A.get_num_nz(); weights.resize(A.get_num_rows()); } this->m_cf_map.resize(size_all); m_s_con.resize(nnz_full); m_scratch.resize(size_full); thrust::fill(weights.begin(), weights.end(), 0.0); cudaCheckError(); thrust::fill(this->m_cf_map.begin(), this->m_cf_map.end(), 0); cudaCheckError(); thrust::fill(m_s_con.begin(), m_s_con.end(), false); cudaCheckError(); thrust::fill(m_scratch.begin(), m_scratch.end(), 0); cudaCheckError(); if (strength != NULL) { if (!A.is_matrix_singleGPU()) { ViewType oldView = A.currentView(); A.setView(FULL); strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum); A.setView(oldView); A.manager->exchange_halo(weights, weights.tag); } else { strength->computeStrongConnectionsAndWeights(A, m_s_con, weights, this->max_row_sum); } } // Mark coarse and fine points selector->markCoarseFinePoints(A, weights, m_s_con, this->m_cf_map, m_scratch); this->m_cf_map.dirtybit = 1; } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeRestrictionOperator() { this->Profile.tic("computeR"); Matrix<T_Config> &A = this->getA(); //allocate necessary memory typedef Vector<typename TConfig::template setVecPrec<AMGX_vecInt>::Type> IVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecBool>::Type> BVector; typedef Vector<typename TConfig::template setVecPrec<AMGX_vecFloat>::Type> FVector; // WARNING: Since energymin P is in computed in CSC format and AMGX does not support // CSC format, we are actually computing P^T (=R) in generateInterpolationMatrix!! //generate the interpolation matrix interpolator->generateInterpolationMatrix(A, this->m_cf_map, R, AMG_Level<TConfig>::amg); this->m_cf_map.clear(); this->m_cf_map.shrink_to_fit(); this->Profile.toc("computeR"); } // Compute R=P^T template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeProlongationOperator() { this->Profile.tic("computeP"); P.set_initialized(0); R.setView(OWNED); transpose(R, P, R.get_num_rows()); if (this->m_min_rows_latency_hiding < 0 || P.get_num_rows() < this->m_min_rows_latency_hiding) { // This will cause bsrmv to not do latency hiding P.setInteriorView(OWNED); P.setExteriorView(OWNED); } P.set_initialized(1); this->Profile.toc("computeP"); } // Compute the Galerkin product: A_c=R*A*P template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1() { FatalError("Energymin AMG computeAOperator_1x1 not implemented on host\n", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1_distributed() { FatalError("Distributed energymin AMG not implemented for host\n", AMGX_ERR_NOT_IMPLEMENTED); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1() { this->Profile.tic("computeA"); Matrix<TConfig_d> &RAP = this->getNextLevel( device_memory() )->getA(); RAP.addProps(CSR); RAP.set_block_dimx(this->getA().get_block_dimx()); RAP.set_block_dimy(this->getA().get_block_dimy()); this->R.set_initialized(0); this->R.addProps(CSR); this->R.set_initialized(1); this->P.set_initialized(0); this->P.addProps(CSR); this->P.set_initialized(1); void *wk = AMG_Level<TConfig_d>::amg->getCsrWorkspace(); if ( wk == NULL ) { wk = CSR_Multiply<TConfig_d>::csr_workspace_create( *(AMG_Level<TConfig_d>::amg->m_cfg), AMG_Level<TConfig_d>::amg->m_cfg_scope ); AMG_Level<TConfig_d>::amg->setCsrWorkspace( wk ); } RAP.set_initialized(0); CSR_Multiply<TConfig_d>::csr_galerkin_product(this->R, this->getA(), this->P, RAP, NULL, NULL, NULL, NULL, NULL, NULL, wk); RAP.set_initialized(1); this->Profile.toc("computeA"); } // Compute the restriction: rr=R*r template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::restrictResidual(VVector &r, VVector &rr) { typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); // we need to resize residual vector to make sure it can store halo rows to be sent if (!P.is_matrix_singleGPU()) { int desired_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()] * rr.get_block_size()); rr.resize(desired_size); } this->Profile.tic("restrictRes"); // Disable speculative send of rr if (P.is_matrix_singleGPU()) { multiply( R, r, rr); } else { multiply_with_mask_restriction( R, r, rr, P); } rr.dirtybit = 1; // Do I need this? if (!P.is_matrix_singleGPU()) { int desired_size = P.manager->halo_offsets[P.manager->neighbors.size()] * rr.get_block_size(); // P.manager->transformVector(rr); //This is just to make sure size is right if (rr.size() < desired_size) { rr.resize(P.manager->halo_offsets[P.manager->neighbors.size()]*rr.get_block_size()); } // P.manager->exchange_halo(rr, rr.tag); } this->Profile.toc("restrictRes"); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Energymin_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > ::computeAOperator_1x1_distributed() { FatalError("Energymin AMG Level computeAOperator_1x1_distributed() not implemented", AMGX_ERR_NOT_IMPLEMENTED); } // Prolongate the error: x+=P*e template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::prolongateAndApplyCorrection(VVector &e, VVector &bc, VVector &x, VVector &tmp) { this->Profile.tic("proCorr"); // get coarse matrix typedef typename TConfig::MemSpace MemorySpace; Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); // Use P.manager to exchange halo of e before doing P // (since P has columns belonging to one of P.neighbors) e.dirtybit = 1; if (!P.is_matrix_singleGPU()) { int e_size = std::max(P.manager->halo_offsets[P.manager->neighbors.size()], Ac.manager->halo_offsets[Ac.manager->neighbors.size()]) * e.get_block_size(); e.resize(e_size); } if (P.is_matrix_singleGPU()) { if (e.size() > 0) {multiply( P, e, tmp);} } else { multiply_with_mask( P, e, tmp); } // get owned num rows for fine matrix int owned_size; if (Ac.is_matrix_distributed()) { int owned_offset; P.manager->getOffsetAndSizeForView(OWNED, &owned_offset, &owned_size); } else { owned_size = x.size(); } //apply axpby(x, tmp, x, ValueType(1), ValueType(1), 0, owned_size); this->Profile.toc("proCorr"); x.dirtybit = 1; } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeAOperator() { if (this->A->get_block_size() == 1) { computeAOperator_1x1(); } else { FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } template <class T_Config> void Energymin_AMG_Level_Base<T_Config> ::computeAOperator_distributed() { if (this->A->get_block_size() == 1) { computeAOperator_1x1_distributed(); } else { FatalError("Energymin AMG not implemented for block_size != 1", AMGX_ERR_NOT_IMPLEMENTED); } } /**************************************** * Explicit instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Energymin_AMG_Level<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace energymin } // namespace amgx
2d980c4c8ebaae34f00f8f35f522e578a68c9e0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "A_add_B.h" int main(void) { // for(int i=0;i<N;i++) { A[i]=1; B[i]=1; } //CPU start_CPU = clock(); for(int i=0;i<N;i++) { C[i]=A[i]+B[i]; } finish_CPU = clock(); //GPU init(N); //GPU start_GPU = clock(); //*vector_add(A,B,N); // hipLaunchKernelGGL(( AaddB), dim3(num_blocks), dim3(threadsPerBlock), 0, 0, dev_A, dev_B, dev_C, N); finish_GPU = clock(); printf("C[10]=%10f \n",C[0]); // hipMemcpy(C , dev_C, size, hipMemcpyDeviceToHost); // hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); // for(int i=0;i<N;i++) { //printf("A=%f %d\n",A[i],i ); //printf("B=%f %d\n",B[i],i ); //printf("%f+%f=%f %d\n",A[i],B[i],C[i],i ); } printf("\n"); // printf("threadsPerBlock =%d\n",threadsPerBlock ); printf("num_blocks=%d\n",num_blocks); printf("\n"); //GPU time_GPU = (double)(finish_GPU - start_GPU)*1000 ; printf( "time_GPU=%f us\n", time_GPU); printf("\n"); //CPU time_CPU = (double)(finish_CPU - start_CPU)*1000 ; printf( "time_CPU=%f us\n", time_CPU); return 0; }
2d980c4c8ebaae34f00f8f35f522e578a68c9e0f.cu
#include "A_add_B.h" int main(void) { //数组赋初值 for(int i=0;i<N;i++) { A[i]=1; B[i]=1; } //开始CPU计算 start_CPU = clock(); for(int i=0;i<N;i++) { C[i]=A[i]+B[i]; } finish_CPU = clock(); //初始化GPU计算的参数 init(N); //开始GPU计算 start_GPU = clock(); //*vector_add(A,B,N); //执行核函数 AaddB<<<num_blocks, threadsPerBlock>>>(dev_A, dev_B, dev_C, N); finish_GPU = clock(); printf("C[10]=%10f \n",C[0]); // 结果拷贝回来 cudaMemcpy(C , dev_C, size, cudaMemcpyDeviceToHost); //释放内存 cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); //输出结果查看 for(int i=0;i<N;i++) { //printf("A=%f %d\n",A[i],i ); //printf("B=%f %d\n",B[i],i ); //printf("%f+%f=%f %d\n",A[i],B[i],C[i],i ); } printf("\n"); //显示线程和线程块的数量 printf("threadsPerBlock =%d\n",threadsPerBlock ); printf("num_blocks=%d\n",num_blocks); printf("\n"); //显示GPU的用时 time_GPU = (double)(finish_GPU - start_GPU)*1000 ; printf( "time_GPU=%f us\n", time_GPU); printf("\n"); //显示CPU的用时 time_CPU = (double)(finish_CPU - start_CPU)*1000 ; printf( "time_CPU=%f us\n", time_CPU); return 0; }
8572da4d9a30b0a434c43badf58ee4accf5c28ae.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/eigen.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/conv_grad_grad_kernel.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/operators/conv_miopen_helper.h" #else #include "paddle/fluid/operators/conv_cudnn_helper.h" #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/padding.h" #include "paddle/phi/kernels/impl/conv_cudnn_impl.h" namespace phi { template <typename T, typename Context> void ConvCudnnGradGradKernel( const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* input_grad, DenseTensor* filter_grad, DenseTensor* out_grad_grad) { auto X = &input; auto W = &filter; auto dO = &out_grad; auto ddX = input_grad_grad.get_ptr(); auto ddW = filter_grad_grad.get_ptr(); auto ddO = out_grad_grad; auto dW = filter_grad; auto dX = input_grad; if (ddO) { ctx.template Alloc<T>(ddO); phi::funcs::SetConstant<Context, T> set_zero; set_zero(ctx, ddO, static_cast<T>(0)); } if (dW) { ctx.template Alloc<T>(dW); } if (dX) { ctx.template Alloc<T>(dX); } // const T* x = X->data<T>(); const T* dy = dO->data<T>(); const T* w = W->data<T>(); const T* ddx = nullptr; const T* ddw = nullptr; T *dw, *dx, *ddy; dw = dx = ddy = nullptr; T* transformed_dx = nullptr; std::vector<int> dilations = dilations_t; bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t; bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, phi::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); std::vector<int> paddings = paddings_t; const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); // transform Tensors to channel first----------- DenseTensor transformed_X_channel(X->type()); DenseTensor transformed_dO_channel(dO->type()); DenseTensor transformed_ddX_channel(X->type()); DenseTensor transformed_ddO_channel(dO->type()); DenseTensor transformed_dX_channel(X->type()); if (channel_last) { ResizeToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); TransToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); ResizeToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); TransToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); if (ddX) { ResizeToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); TransToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); } if (ddO) { ResizeToChannelFirst<Context, T>(ctx, ddO, &transformed_ddO_channel); } if (dX) { ResizeToChannelFirst<Context, T>(ctx, dX, &transformed_dX_channel); ctx.template Alloc<T>(&transformed_dX_channel); } } else { transformed_X_channel = *X; transformed_dO_channel = *dO; if (ddX) { transformed_ddX_channel = *ddX; } if (ddO) { transformed_ddO_channel.ShareDataWith(*ddO); } if (dX) { transformed_dX_channel.ShareDataWith(*dX); } } auto in_dims = transformed_X_channel.dims(); auto filter_dims = W->dims(); DDim in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = vectorize<int>(filter_data_dims); UpdatePaddingAndDilation( &paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim); DenseTensor transformed_X(X->type()); DenseTensor transformed_ddX(X->type()); DenseTensor transformed_dX(X->type()); std::vector<int> padding_common(data_dim, 0); std::vector<int> input_pad(X->dims().size() * 2, 0); if (!is_sys_pad) { // get pad std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_X_channel.dims()[0]; new_input_shape_vec[1] = transformed_X_channel.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = transformed_X_channel.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } DDim new_input_shape(make_ddim(new_input_shape_vec)); transformed_X.Resize(new_input_shape); transformed_ddX.Resize(new_input_shape); transformed_dX.Resize(new_input_shape); ctx.template Alloc<T>(&transformed_X); if (ddX) { ctx.template Alloc<T>(&transformed_ddX); } if (dX) { ctx.template Alloc<T>(&transformed_dX); } // pad for input const int rank = X->dims().size(); T pad_value(0.0); switch (rank) { case 4: { funcs::PadFunction<Context, T, 4>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 4>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; case 5: { funcs::PadFunction<Context, T, 5>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 5>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; default: PADDLE_THROW(phi::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_X.ShareDataWith(transformed_X_channel); if (ddX) { transformed_ddX.ShareDataWith(transformed_ddX_channel); } if (dX) { transformed_dX.ShareDataWith(transformed_dX_channel); } if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* x = transformed_X.data<T>(); int iwo_group = groups; int c_group = 1; #if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) iwo_group = 1; c_group = groups; groups = 1; #endif auto dtype = paddle::platform::CudnnDataType<T>::type; auto handle = ctx.cudnn_handle(); paddle::operators::ConvArgs args1{&transformed_ddX, W, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; #ifdef PADDLE_WITH_HIP paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result1; paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result2; paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> data_result; paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t> filter_result; #else paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result1; paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result2; paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> data_result; paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t> filter_result; #endif auto layout = paddle::platform::GetCudnnTensorFormat( paddle::platform::DataLayout::kNCHW); // ddo = conv(ddI, W) + conv(I, ddW) size_t workspace_size = 0; T* transformed_ddy_channel = nullptr; if (ddO) { ddy = ddO->data<T>(); transformed_ddy_channel = transformed_ddO_channel.data<T>(); if (ddX) { args1.handle = handle; args1.idesc.set(transformed_ddX, iwo_group); args1.wdesc.set(*W, layout, iwo_group); args1.odesc.set(transformed_ddO_channel, iwo_group); args1.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search1 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search1::GetWorkspaceSize(args1); fwd_result1.algo = search1::Find<T>( args1, exhaustive_search, false, workspace_size, ctx); #else using search1 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_result1 = search1::Find<T>(args1, exhaustive_search, false, ctx); workspace_size = search1::GetWorkspaceSize(args1, fwd_result1.algo); #endif } if (ddW) { ddw = ddW->data<T>(); args2.handle = handle; args2.idesc.set(transformed_X, iwo_group); args2.wdesc.set(*ddW, layout, iwo_group); args2.odesc.set(transformed_ddO_channel, iwo_group); args2.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search2 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = ::max(workspace_size, search2::GetWorkspaceSize(args2)); fwd_result2.algo = search2::Find<T>( args2, exhaustive_search, false, workspace_size, ctx); #else using search2 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_result2 = search2::Find<T>(args2, exhaustive_search, false, ctx); workspace_size = ::max( workspace_size, search2::GetWorkspaceSize(args2, fwd_result2.algo)); #endif } } if (dW && ddX) { dw = dW->data<T>(); args3.handle = handle; args3.idesc.set(transformed_ddX, iwo_group); args3.wdesc.set(*dW, layout, iwo_group); args3.odesc.set(transformed_dO_channel, iwo_group); args3.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search3 = paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>; workspace_size = ::max(workspace_size, search3::GetWorkspaceSize(args3)); filter_result.algo = search3::Find<T>( args3, exhaustive_search, deterministic, workspace_size, ctx); #else using search3 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>; filter_result = search3::Find<T>(args3, exhaustive_search, deterministic, ctx); workspace_size = ::max( workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo)); #endif } if (ddW && dX) { transformed_dx = transformed_dX.data<T>(); args4.handle = handle; args4.idesc.set(transformed_dX, iwo_group); args4.wdesc.set(*ddW, layout, iwo_group); args4.odesc.set(transformed_dO_channel, iwo_group); args4.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search4 = paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>; workspace_size = ::max(workspace_size, search4::GetWorkspaceSize(args4)); data_result.algo = search4::Find<T>( args4, exhaustive_search, deterministic, workspace_size, ctx); #else using search4 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>; data_result = search4::Find<T>(args4, exhaustive_search, deterministic, ctx); workspace_size = ::max( workspace_size, search4::GetWorkspaceSize(args4, data_result.algo)); #endif } int i_n, i_c, i_d, i_h, i_w; GetNCDHW( transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); int o_n, o_c, o_d, o_h, o_w; GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = W->numel() / groups; paddle::operators::ScalingParamType<T> alpha = 1.0f; paddle::operators::ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : // 0.0f; // VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto"); auto wkspace_handle = ctx.cudnn_workspace_handle(); if (ddO) { if (ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx, args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_result1.algo, &beta, args1.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx + i * group_offset_in, args1.wdesc.desc(), w + i * group_offset_filter, args1.cdesc.desc(), fwd_result1.algo, workspace_ptr, workspace_size, &beta, args1.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (ddW) { #ifdef PADDLE_WITH_HIP // MIOPEN ONLY support beta to be 0.0f wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(), ddw, args2.cdesc.desc(), fwd_result2.algo, &beta, args2.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args2.idesc.desc(), x + i * group_offset_in, args2.wdesc.desc(), ddw + i * group_offset_filter, args2.cdesc.desc(), fwd_result2.algo, workspace_ptr, workspace_size, &alpha, args2.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_ddO_channel, ddO); } } T* transformed_dy_channel = transformed_dO_channel.data<T>(); if (dW && ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args3.odesc.desc(), transformed_dy_channel, args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_result.algo, &beta, args3.wdesc.desc(), dw, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in, args3.odesc.desc(), transformed_dy_channel + i * group_offset_out, args3.cdesc.desc(), filter_result.algo, workspace_ptr, workspace_size, &beta, args3.wdesc.desc(), dw + i * group_offset_filter)); }, workspace_size); } #endif } if (dX && ddW) { ddw = ddW->data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args4.odesc.desc(), transformed_dy_channel, args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_result.algo, &beta, args4.idesc.desc(), transformed_dx, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args4.wdesc.desc(), ddw + i * group_offset_filter, args4.odesc.desc(), transformed_dy_channel + i * group_offset_out, args4.cdesc.desc(), data_result.algo, workspace_ptr, workspace_size, &beta, args4.idesc.desc(), transformed_dx + i * group_offset_in)); }, workspace_size); } #endif if (!is_sys_pad) { // reverse padded input std::vector<int> starts(X->dims().size(), 0); std::vector<int> axes(X->dims().size(), 0); for (size_t i = 0; i < X->dims().size(); ++i) { starts[i] = input_pad[2 * i]; axes[i] = i; } if (X->dims().size() == 4) { paddle::operators::RemovePaddingSlice<Context, T, 4>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } else { paddle::operators::RemovePaddingSlice<Context, T, 5>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_dX_channel, dX); } } } template <typename T, typename Context> void DepthwiseConvCudnnGradGradKernel( const Context& ctx, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, bool fuse_relu, DenseTensor* out_grad_grad, DenseTensor* input_grad, DenseTensor* filter_grad) { ConvCudnnGradGradKernel<T>(ctx, input, filter, out_grad, input_grad_grad, filter_grad_grad, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, input_grad, filter_grad, out_grad_grad); } template <typename T, typename Context> void Conv3DCudnnGradGradKernel( const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* input_grad, DenseTensor* filter_grad, DenseTensor* out_grad_grad) { ConvCudnnGradGradKernel<T>(ctx, input, filter, out_grad, input_grad_grad, filter_grad_grad, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, input_grad, filter_grad, out_grad_grad); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, phi::dtype::float16) {} #else #if CUDNN_VERSION_MIN(8, 1, 0) PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} #else PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, double, phi::dtype::float16) {} #endif #endif
8572da4d9a30b0a434c43badf58ee4accf5c28ae.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/eigen.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/conv_grad_grad_kernel.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/operators/conv_miopen_helper.h" #else #include "paddle/fluid/operators/conv_cudnn_helper.h" #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/padding.h" #include "paddle/phi/kernels/impl/conv_cudnn_impl.h" namespace phi { template <typename T, typename Context> void ConvCudnnGradGradKernel( const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* input_grad, DenseTensor* filter_grad, DenseTensor* out_grad_grad) { auto X = &input; auto W = &filter; auto dO = &out_grad; auto ddX = input_grad_grad.get_ptr(); auto ddW = filter_grad_grad.get_ptr(); auto ddO = out_grad_grad; auto dW = filter_grad; auto dX = input_grad; if (ddO) { ctx.template Alloc<T>(ddO); phi::funcs::SetConstant<Context, T> set_zero; set_zero(ctx, ddO, static_cast<T>(0)); } if (dW) { ctx.template Alloc<T>(dW); } if (dX) { ctx.template Alloc<T>(dX); } // const T* x = X->data<T>(); const T* dy = dO->data<T>(); const T* w = W->data<T>(); const T* ddx = nullptr; const T* ddw = nullptr; T *dw, *dx, *ddy; dw = dx = ddy = nullptr; T* transformed_dx = nullptr; std::vector<int> dilations = dilations_t; bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t; bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, phi::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); std::vector<int> paddings = paddings_t; const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); // transform Tensors to channel first----------- DenseTensor transformed_X_channel(X->type()); DenseTensor transformed_dO_channel(dO->type()); DenseTensor transformed_ddX_channel(X->type()); DenseTensor transformed_ddO_channel(dO->type()); DenseTensor transformed_dX_channel(X->type()); if (channel_last) { ResizeToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); TransToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); ResizeToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); TransToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); if (ddX) { ResizeToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); TransToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); } if (ddO) { ResizeToChannelFirst<Context, T>(ctx, ddO, &transformed_ddO_channel); } if (dX) { ResizeToChannelFirst<Context, T>(ctx, dX, &transformed_dX_channel); ctx.template Alloc<T>(&transformed_dX_channel); } } else { transformed_X_channel = *X; transformed_dO_channel = *dO; if (ddX) { transformed_ddX_channel = *ddX; } if (ddO) { transformed_ddO_channel.ShareDataWith(*ddO); } if (dX) { transformed_dX_channel.ShareDataWith(*dX); } } auto in_dims = transformed_X_channel.dims(); auto filter_dims = W->dims(); DDim in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = vectorize<int>(filter_data_dims); UpdatePaddingAndDilation( &paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim); DenseTensor transformed_X(X->type()); DenseTensor transformed_ddX(X->type()); DenseTensor transformed_dX(X->type()); std::vector<int> padding_common(data_dim, 0); std::vector<int> input_pad(X->dims().size() * 2, 0); if (!is_sys_pad) { // get pad std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_X_channel.dims()[0]; new_input_shape_vec[1] = transformed_X_channel.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = transformed_X_channel.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } DDim new_input_shape(make_ddim(new_input_shape_vec)); transformed_X.Resize(new_input_shape); transformed_ddX.Resize(new_input_shape); transformed_dX.Resize(new_input_shape); ctx.template Alloc<T>(&transformed_X); if (ddX) { ctx.template Alloc<T>(&transformed_ddX); } if (dX) { ctx.template Alloc<T>(&transformed_dX); } // pad for input const int rank = X->dims().size(); T pad_value(0.0); switch (rank) { case 4: { funcs::PadFunction<Context, T, 4>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 4>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; case 5: { funcs::PadFunction<Context, T, 5>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 5>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; default: PADDLE_THROW(phi::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_X.ShareDataWith(transformed_X_channel); if (ddX) { transformed_ddX.ShareDataWith(transformed_ddX_channel); } if (dX) { transformed_dX.ShareDataWith(transformed_dX_channel); } if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* x = transformed_X.data<T>(); int iwo_group = groups; int c_group = 1; #if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) iwo_group = 1; c_group = groups; groups = 1; #endif auto dtype = paddle::platform::CudnnDataType<T>::type; auto handle = ctx.cudnn_handle(); paddle::operators::ConvArgs args1{&transformed_ddX, W, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; #ifdef PADDLE_WITH_HIP paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result1; paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result2; paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> data_result; paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t> filter_result; #else paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result1; paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result2; paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> data_result; paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t> filter_result; #endif auto layout = paddle::platform::GetCudnnTensorFormat( paddle::platform::DataLayout::kNCHW); // ddo = conv(ddI, W) + conv(I, ddW) size_t workspace_size = 0; T* transformed_ddy_channel = nullptr; if (ddO) { ddy = ddO->data<T>(); transformed_ddy_channel = transformed_ddO_channel.data<T>(); if (ddX) { args1.handle = handle; args1.idesc.set(transformed_ddX, iwo_group); args1.wdesc.set(*W, layout, iwo_group); args1.odesc.set(transformed_ddO_channel, iwo_group); args1.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search1 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search1::GetWorkspaceSize(args1); fwd_result1.algo = search1::Find<T>( args1, exhaustive_search, false, workspace_size, ctx); #else using search1 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_result1 = search1::Find<T>(args1, exhaustive_search, false, ctx); workspace_size = search1::GetWorkspaceSize(args1, fwd_result1.algo); #endif } if (ddW) { ddw = ddW->data<T>(); args2.handle = handle; args2.idesc.set(transformed_X, iwo_group); args2.wdesc.set(*ddW, layout, iwo_group); args2.odesc.set(transformed_ddO_channel, iwo_group); args2.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search2 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2)); fwd_result2.algo = search2::Find<T>( args2, exhaustive_search, false, workspace_size, ctx); #else using search2 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_result2 = search2::Find<T>(args2, exhaustive_search, false, ctx); workspace_size = std::max( workspace_size, search2::GetWorkspaceSize(args2, fwd_result2.algo)); #endif } } if (dW && ddX) { dw = dW->data<T>(); args3.handle = handle; args3.idesc.set(transformed_ddX, iwo_group); args3.wdesc.set(*dW, layout, iwo_group); args3.odesc.set(transformed_dO_channel, iwo_group); args3.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search3 = paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>; workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3)); filter_result.algo = search3::Find<T>( args3, exhaustive_search, deterministic, workspace_size, ctx); #else using search3 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>; filter_result = search3::Find<T>(args3, exhaustive_search, deterministic, ctx); workspace_size = std::max( workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo)); #endif } if (ddW && dX) { transformed_dx = transformed_dX.data<T>(); args4.handle = handle; args4.idesc.set(transformed_dX, iwo_group); args4.wdesc.set(*ddW, layout, iwo_group); args4.odesc.set(transformed_dO_channel, iwo_group); args4.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search4 = paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>; workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4)); data_result.algo = search4::Find<T>( args4, exhaustive_search, deterministic, workspace_size, ctx); #else using search4 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>; data_result = search4::Find<T>(args4, exhaustive_search, deterministic, ctx); workspace_size = std::max( workspace_size, search4::GetWorkspaceSize(args4, data_result.algo)); #endif } int i_n, i_c, i_d, i_h, i_w; GetNCDHW( transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); int o_n, o_c, o_d, o_h, o_w; GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = W->numel() / groups; paddle::operators::ScalingParamType<T> alpha = 1.0f; paddle::operators::ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : // 0.0f; // VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto"); auto wkspace_handle = ctx.cudnn_workspace_handle(); if (ddO) { if (ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx, args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_result1.algo, &beta, args1.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx + i * group_offset_in, args1.wdesc.desc(), w + i * group_offset_filter, args1.cdesc.desc(), fwd_result1.algo, workspace_ptr, workspace_size, &beta, args1.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (ddW) { #ifdef PADDLE_WITH_HIP // MIOPEN ONLY support beta to be 0.0f wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(), ddw, args2.cdesc.desc(), fwd_result2.algo, &beta, args2.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args2.idesc.desc(), x + i * group_offset_in, args2.wdesc.desc(), ddw + i * group_offset_filter, args2.cdesc.desc(), fwd_result2.algo, workspace_ptr, workspace_size, &alpha, args2.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_ddO_channel, ddO); } } T* transformed_dy_channel = transformed_dO_channel.data<T>(); if (dW && ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args3.odesc.desc(), transformed_dy_channel, args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_result.algo, &beta, args3.wdesc.desc(), dw, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in, args3.odesc.desc(), transformed_dy_channel + i * group_offset_out, args3.cdesc.desc(), filter_result.algo, workspace_ptr, workspace_size, &beta, args3.wdesc.desc(), dw + i * group_offset_filter)); }, workspace_size); } #endif } if (dX && ddW) { ddw = ddW->data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args4.odesc.desc(), transformed_dy_channel, args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_result.algo, &beta, args4.idesc.desc(), transformed_dx, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args4.wdesc.desc(), ddw + i * group_offset_filter, args4.odesc.desc(), transformed_dy_channel + i * group_offset_out, args4.cdesc.desc(), data_result.algo, workspace_ptr, workspace_size, &beta, args4.idesc.desc(), transformed_dx + i * group_offset_in)); }, workspace_size); } #endif if (!is_sys_pad) { // reverse padded input std::vector<int> starts(X->dims().size(), 0); std::vector<int> axes(X->dims().size(), 0); for (size_t i = 0; i < X->dims().size(); ++i) { starts[i] = input_pad[2 * i]; axes[i] = i; } if (X->dims().size() == 4) { paddle::operators::RemovePaddingSlice<Context, T, 4>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } else { paddle::operators::RemovePaddingSlice<Context, T, 5>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_dX_channel, dX); } } } template <typename T, typename Context> void DepthwiseConvCudnnGradGradKernel( const Context& ctx, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, bool fuse_relu, DenseTensor* out_grad_grad, DenseTensor* input_grad, DenseTensor* filter_grad) { ConvCudnnGradGradKernel<T>(ctx, input, filter, out_grad, input_grad_grad, filter_grad_grad, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, input_grad, filter_grad, out_grad_grad); } template <typename T, typename Context> void Conv3DCudnnGradGradKernel( const Context& ctx, const DenseTensor& input, const DenseTensor& filter, const DenseTensor& out_grad, const paddle::optional<DenseTensor>& input_grad_grad, const paddle::optional<DenseTensor>& filter_grad_grad, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* input_grad, DenseTensor* filter_grad, DenseTensor* out_grad_grad) { ConvCudnnGradGradKernel<T>(ctx, input, filter, out_grad, input_grad_grad, filter_grad_grad, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, input_grad, filter_grad, out_grad_grad); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, phi::dtype::float16) {} #else #if CUDNN_VERSION_MIN(8, 1, 0) PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} #else PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, double, phi::dtype::float16) {} #endif #endif
782f6a508ff55d8ebaa3f0c33fa37322ebc5eb1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @File main.cu * * The main file of the project * * Paraleln programovn na GPU (PCG 2020) * Projekt c. 1 (cuda) * Login: xmarci10 */ #include <sys/time.h> #include <cstdio> #include <cmath> #include "nbody.h" #include "h5Helper.h" /** * Main rotine * @param argc * @param argv * @return */ int main(int argc, char **argv) { // Time measurement struct timeval t1, t2; if (argc != 10) { printf("Usage: nbody <N> <dt> <steps> <threads/block> <write intesity> <reduction threads> <reduction threads/block> <input> <output>\n"); exit(1); } // Number of particles const int N = std::stoi(argv[1]); // Length of time step const float dt = std::stof(argv[2]); // Number of steps const int steps = std::stoi(argv[3]); // Number of thread blocks const int thr_blc = std::stoi(argv[4]); // Write frequency int writeFreq = std::stoi(argv[5]); // number of reduction threads const int red_thr = std::stoi(argv[6]); // Number of reduction threads/blocks const int red_thr_blc = std::stoi(argv[7]); // Size of the simulation CUDA gird - number of blocks const size_t simulationGrid = (N + thr_blc - 1) / thr_blc; // Size of the reduction CUDA grid - number of blocks const size_t reductionGrid = (red_thr + red_thr_blc - 1) / red_thr_blc; // Size of the shared memory used in calculation_velocity kernel const size_t shared_mem_size = thr_blc * 7 * sizeof(float); // Size of the shared memory used in centerOfMass kernel const size_t reduction_shared_mem_size = (red_thr_blc/32) * 4 * sizeof(float); // Log benchmark setup printf("N: %d\n", N); printf("dt: %f\n", dt); printf("steps: %d\n", steps); printf("threads/block: %d\n", thr_blc); printf("blocks/grid: %lu\n", simulationGrid); printf("reduction threads/block: %d\n", red_thr_blc); printf("reduction blocks/grid: %lu\n", reductionGrid); const size_t recordsNum = (writeFreq > 0) ? (steps + writeFreq - 1) / writeFreq : 0; writeFreq = (writeFreq > 0) ? writeFreq : 0; // CPU side memory allocation t_particles particles_cpu; float4 comOnGPU; hipHostMalloc(&particles_cpu.pos_x, N*sizeof(float),hipHostMallocDefault); hipHostMalloc(&particles_cpu.pos_y, N*sizeof(float),hipHostMallocDefault); hipHostMalloc(&particles_cpu.pos_z, N*sizeof(float),hipHostMallocDefault); hipHostMalloc(&particles_cpu.vel_x, N*sizeof(float),hipHostMallocDefault); hipHostMalloc(&particles_cpu.vel_y, N*sizeof(float),hipHostMallocDefault); hipHostMalloc(&particles_cpu.vel_z, N*sizeof(float),hipHostMallocDefault); hipHostMalloc(&particles_cpu.weight, N*sizeof(float),hipHostMallocDefault); /* * Caution! Create only after CPU side allocation * parameters: * Stride of two Offset of the first * Data pointer consecutive elements element in floats, * in floats, not bytes not bytes */ MemDesc md( particles_cpu.pos_x, 1, 0, // Postition in X particles_cpu.pos_y, 1, 0, // Postition in Y particles_cpu.pos_z, 1, 0, // Postition in Z particles_cpu.vel_x, 1, 0, // Velocity in X particles_cpu.vel_y, 1, 0, // Velocity in Y particles_cpu.vel_z, 1, 0, // Velocity in Z particles_cpu.weight, 1, 0, // Weight N, // Number of particles recordsNum); // Number of records in output file // Initialisation of helper class and loading of input data H5Helper h5Helper(argv[8], argv[9], md); try { h5Helper.init(); h5Helper.readParticleData(); } catch (const std::exception& e) { std::cerr<<e.what()<<std::endl; return -1; } // GPU side memory allocation // Step 3.* float4 *centerOfMassGPU; int *lock; hipMalloc(&centerOfMassGPU, 4*sizeof(float)); hipMalloc(&lock, sizeof(int)); // Step 0-2 t_particles particles_gpuIn; t_particles particles_gpuOut; t_particles particles_tmp; hipMalloc(&particles_gpuIn.pos_x, N*sizeof(float)); hipMalloc(&particles_gpuIn.pos_y, N*sizeof(float)); hipMalloc(&particles_gpuIn.pos_z, N*sizeof(float)); hipMalloc(&particles_gpuIn.vel_x, N*sizeof(float)); hipMalloc(&particles_gpuIn.vel_y, N*sizeof(float)); hipMalloc(&particles_gpuIn.vel_z, N*sizeof(float)); hipMalloc(&particles_gpuIn.weight, N*sizeof(float)); hipMalloc(&particles_gpuOut.pos_x, N*sizeof(float)); hipMalloc(&particles_gpuOut.pos_y, N*sizeof(float)); hipMalloc(&particles_gpuOut.pos_z, N*sizeof(float)); hipMalloc(&particles_gpuOut.vel_x, N*sizeof(float)); hipMalloc(&particles_gpuOut.vel_y, N*sizeof(float)); hipMalloc(&particles_gpuOut.vel_z, N*sizeof(float)); hipMalloc(&particles_gpuOut.weight, N*sizeof(float)); // Transfer data to GPU // Step 3.* hipMemset(centerOfMassGPU, 0.0f, 4*sizeof(float)); hipMemset(lock, 0, sizeof(int)); // Step 0-2 hipMemcpy(particles_gpuIn.pos_x, particles_cpu.pos_x, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(particles_gpuIn.pos_y, particles_cpu.pos_y, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(particles_gpuIn.pos_z, particles_cpu.pos_z, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(particles_gpuIn.vel_x, particles_cpu.vel_x, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(particles_gpuIn.vel_y, particles_cpu.vel_y, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(particles_gpuIn.vel_z, particles_cpu.vel_z, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(particles_gpuIn.weight, particles_cpu.weight, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(particles_gpuOut.weight, particles_cpu.weight, N*sizeof(float), hipMemcpyHostToDevice); // Streams and events allocation Step 4 hipStream_t stream_integrate, stream_com, stream_memcpy; hipStreamCreate(&stream_integrate); hipStreamCreate(&stream_com); hipStreamCreate(&stream_memcpy); hipEvent_t particles_finished, com_finished; hipEventCreate(&particles_finished); hipEventCreate(&com_finished); gettimeofday(&t1, 0); for(struct {int s = 0; int record_num = 0;} loop; loop.s < steps; loop.s++) { // Kernel invocation in stream_integrate hipLaunchKernelGGL(( calculate_velocity), dim3(simulationGrid), dim3(thr_blc), shared_mem_size, stream_integrate, particles_gpuIn, particles_gpuOut, N, dt); // Inserting an event indicating the completion of the particle position calculation hipEventRecord(particles_finished, stream_integrate); if (writeFreq > 0 && (loop.s % writeFreq == 0)) { // Kernel invocation in stream_com hipMemsetAsync(centerOfMassGPU, 0.0f, 4*sizeof(float), stream_com); hipLaunchKernelGGL(( centerOfMass), dim3(reductionGrid), dim3(red_thr_blc), reduction_shared_mem_size, stream_com, particles_gpuIn, &centerOfMassGPU->x, &centerOfMassGPU->y, &centerOfMassGPU->z, &centerOfMassGPU->w, lock, N); // Inserting an event indicating the completion of the center of mass calculation hipEventRecord(com_finished, stream_com); // Transfer practicles to CPU in stream_memcpy hipMemcpyAsync(particles_cpu.pos_x, particles_gpuIn.pos_x, N*sizeof(float), hipMemcpyDeviceToHost, stream_memcpy); hipMemcpyAsync(particles_cpu.pos_y, particles_gpuIn.pos_y, N*sizeof(float), hipMemcpyDeviceToHost, stream_memcpy); hipMemcpyAsync(particles_cpu.pos_z, particles_gpuIn.pos_z, N*sizeof(float), hipMemcpyDeviceToHost, stream_memcpy); hipMemcpyAsync(particles_cpu.vel_x, particles_gpuIn.vel_x, N*sizeof(float), hipMemcpyDeviceToHost, stream_memcpy); hipMemcpyAsync(particles_cpu.vel_y, particles_gpuIn.vel_y, N*sizeof(float), hipMemcpyDeviceToHost, stream_memcpy); hipMemcpyAsync(particles_cpu.vel_z, particles_gpuIn.vel_z, N*sizeof(float), hipMemcpyDeviceToHost, stream_memcpy); // CPU waits until particles data will be available hipStreamSynchronize(stream_memcpy); // Putting a wait for the com_finished event in the stream_memcpy hipStreamWaitEvent(stream_memcpy, com_finished, 0); // Transfer com to CPU in stream_memcpy hipMemcpyAsync(&comOnGPU.x, centerOfMassGPU, 4*sizeof(float), hipMemcpyDeviceToHost, stream_memcpy); // While com is copied from D2H, CPU is writing particles into output file h5Helper.writeParticleData(loop.record_num); // CPU waits until com data will be available // It also ensures that com ends before a new integrate step begins hipStreamSynchronize(stream_memcpy); // CPU writes the com data into output file comOnGPU.x = comOnGPU.x / comOnGPU.w; comOnGPU.y = comOnGPU.y / comOnGPU.w; comOnGPU.z = comOnGPU.z / comOnGPU.w; h5Helper.writeCom(comOnGPU.x, comOnGPU.y, comOnGPU.z, comOnGPU.w, loop.record_num++); } // stream_com needs to wait for an input data hipStreamWaitEvent(stream_com, particles_finished, 0); // stream_memcpy also needs to wait until the particles computing is finished and after // that the data can be copied from D2H hipStreamWaitEvent(stream_memcpy, particles_finished, 0); // swap pointers particles_tmp = particles_gpuOut; particles_gpuOut = particles_gpuIn; particles_gpuIn = particles_tmp; } hipDeviceSynchronize(); hipMemset(centerOfMassGPU, 0.0f, 4*sizeof(float)); // Kernel invoaction hipLaunchKernelGGL(( centerOfMass), dim3(reductionGrid), dim3(red_thr_blc), reduction_shared_mem_size, 0, particles_gpuIn, &centerOfMassGPU->x, &centerOfMassGPU->y, &centerOfMassGPU->z, &centerOfMassGPU->w, lock, N); gettimeofday(&t2, 0); // Approximate simulation wall time double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0; printf("Time: %f s\n", t); // Transfer results back to the CPU // Step 3.* hipMemcpy(&comOnGPU.x, centerOfMassGPU, 4*sizeof(float), hipMemcpyDeviceToHost); // Step 0-2 hipMemcpy(particles_cpu.pos_x, particles_gpuIn.pos_x, N*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.pos_y, particles_gpuIn.pos_y, N*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.pos_z, particles_gpuIn.pos_z, N*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.vel_x, particles_gpuIn.vel_x, N*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.vel_y, particles_gpuIn.vel_y, N*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.vel_z, particles_gpuIn.vel_z, N*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.weight, particles_gpuIn.weight, N*sizeof(float), hipMemcpyDeviceToHost); // CPU completes the calculation of CenterOfMass comOnGPU.x = comOnGPU.x / comOnGPU.w; comOnGPU.y = comOnGPU.y / comOnGPU.w; comOnGPU.z = comOnGPU.z / comOnGPU.w; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers for center-of-mass (step 3.1, step 3.2) // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float4 comOnCPU = centerOfMassCPU(md); std::cout << "Center of mass on CPU:" << std::endl << comOnCPU.x <<", " << comOnCPU.y <<", " << comOnCPU.z <<", " << comOnCPU.w << std::endl; std::cout << "Center of mass on GPU:" << std::endl << comOnGPU.x<<", " << comOnGPU.y<<", " << comOnGPU.z<<", " << comOnGPU.w << std::endl; // Writing final values to the file h5Helper.writeComFinal(comOnGPU.x, comOnGPU.y, comOnGPU.z, comOnGPU.w); h5Helper.writeParticleDataFinal(); // Free CPU memory hipHostFree(particles_cpu.pos_x ); hipHostFree(particles_cpu.pos_y ); hipHostFree(particles_cpu.pos_z ); hipHostFree(particles_cpu.vel_x ); hipHostFree(particles_cpu.vel_y ); hipHostFree(particles_cpu.vel_z ); hipHostFree(particles_cpu.weight); // Free GPU memory hipFree(particles_gpuIn.pos_x); hipFree(particles_gpuIn.pos_y); hipFree(particles_gpuIn.pos_z); hipFree(particles_gpuIn.vel_x); hipFree(particles_gpuIn.vel_y); hipFree(particles_gpuIn.vel_z); hipFree(particles_gpuIn.weight); hipFree(particles_gpuOut.pos_x); hipFree(particles_gpuOut.pos_y); hipFree(particles_gpuOut.pos_z); hipFree(particles_gpuOut.vel_x); hipFree(particles_gpuOut.vel_y); hipFree(particles_gpuOut.vel_z); hipFree(particles_gpuOut.weight); hipFree(centerOfMassGPU); hipFree(lock); hipStreamDestroy(stream_integrate); hipStreamDestroy(stream_memcpy); hipStreamDestroy(stream_com); hipEventDestroy(com_finished); hipEventDestroy(particles_finished); return 0; }// end of main //----------------------------------------------------------------------------------------------------------------------
782f6a508ff55d8ebaa3f0c33fa37322ebc5eb1e.cu
/** * @File main.cu * * The main file of the project * * Paralelní programování na GPU (PCG 2020) * Projekt c. 1 (cuda) * Login: xmarci10 */ #include <sys/time.h> #include <cstdio> #include <cmath> #include "nbody.h" #include "h5Helper.h" /** * Main rotine * @param argc * @param argv * @return */ int main(int argc, char **argv) { // Time measurement struct timeval t1, t2; if (argc != 10) { printf("Usage: nbody <N> <dt> <steps> <threads/block> <write intesity> <reduction threads> <reduction threads/block> <input> <output>\n"); exit(1); } // Number of particles const int N = std::stoi(argv[1]); // Length of time step const float dt = std::stof(argv[2]); // Number of steps const int steps = std::stoi(argv[3]); // Number of thread blocks const int thr_blc = std::stoi(argv[4]); // Write frequency int writeFreq = std::stoi(argv[5]); // number of reduction threads const int red_thr = std::stoi(argv[6]); // Number of reduction threads/blocks const int red_thr_blc = std::stoi(argv[7]); // Size of the simulation CUDA gird - number of blocks const size_t simulationGrid = (N + thr_blc - 1) / thr_blc; // Size of the reduction CUDA grid - number of blocks const size_t reductionGrid = (red_thr + red_thr_blc - 1) / red_thr_blc; // Size of the shared memory used in calculation_velocity kernel const size_t shared_mem_size = thr_blc * 7 * sizeof(float); // Size of the shared memory used in centerOfMass kernel const size_t reduction_shared_mem_size = (red_thr_blc/32) * 4 * sizeof(float); // Log benchmark setup printf("N: %d\n", N); printf("dt: %f\n", dt); printf("steps: %d\n", steps); printf("threads/block: %d\n", thr_blc); printf("blocks/grid: %lu\n", simulationGrid); printf("reduction threads/block: %d\n", red_thr_blc); printf("reduction blocks/grid: %lu\n", reductionGrid); const size_t recordsNum = (writeFreq > 0) ? (steps + writeFreq - 1) / writeFreq : 0; writeFreq = (writeFreq > 0) ? writeFreq : 0; // CPU side memory allocation t_particles particles_cpu; float4 comOnGPU; cudaHostAlloc(&particles_cpu.pos_x, N*sizeof(float),cudaHostAllocDefault); cudaHostAlloc(&particles_cpu.pos_y, N*sizeof(float),cudaHostAllocDefault); cudaHostAlloc(&particles_cpu.pos_z, N*sizeof(float),cudaHostAllocDefault); cudaHostAlloc(&particles_cpu.vel_x, N*sizeof(float),cudaHostAllocDefault); cudaHostAlloc(&particles_cpu.vel_y, N*sizeof(float),cudaHostAllocDefault); cudaHostAlloc(&particles_cpu.vel_z, N*sizeof(float),cudaHostAllocDefault); cudaHostAlloc(&particles_cpu.weight, N*sizeof(float),cudaHostAllocDefault); /* * Caution! Create only after CPU side allocation * parameters: * Stride of two Offset of the first * Data pointer consecutive elements element in floats, * in floats, not bytes not bytes */ MemDesc md( particles_cpu.pos_x, 1, 0, // Postition in X particles_cpu.pos_y, 1, 0, // Postition in Y particles_cpu.pos_z, 1, 0, // Postition in Z particles_cpu.vel_x, 1, 0, // Velocity in X particles_cpu.vel_y, 1, 0, // Velocity in Y particles_cpu.vel_z, 1, 0, // Velocity in Z particles_cpu.weight, 1, 0, // Weight N, // Number of particles recordsNum); // Number of records in output file // Initialisation of helper class and loading of input data H5Helper h5Helper(argv[8], argv[9], md); try { h5Helper.init(); h5Helper.readParticleData(); } catch (const std::exception& e) { std::cerr<<e.what()<<std::endl; return -1; } // GPU side memory allocation // Step 3.* float4 *centerOfMassGPU; int *lock; cudaMalloc(&centerOfMassGPU, 4*sizeof(float)); cudaMalloc(&lock, sizeof(int)); // Step 0-2 t_particles particles_gpuIn; t_particles particles_gpuOut; t_particles particles_tmp; cudaMalloc(&particles_gpuIn.pos_x, N*sizeof(float)); cudaMalloc(&particles_gpuIn.pos_y, N*sizeof(float)); cudaMalloc(&particles_gpuIn.pos_z, N*sizeof(float)); cudaMalloc(&particles_gpuIn.vel_x, N*sizeof(float)); cudaMalloc(&particles_gpuIn.vel_y, N*sizeof(float)); cudaMalloc(&particles_gpuIn.vel_z, N*sizeof(float)); cudaMalloc(&particles_gpuIn.weight, N*sizeof(float)); cudaMalloc(&particles_gpuOut.pos_x, N*sizeof(float)); cudaMalloc(&particles_gpuOut.pos_y, N*sizeof(float)); cudaMalloc(&particles_gpuOut.pos_z, N*sizeof(float)); cudaMalloc(&particles_gpuOut.vel_x, N*sizeof(float)); cudaMalloc(&particles_gpuOut.vel_y, N*sizeof(float)); cudaMalloc(&particles_gpuOut.vel_z, N*sizeof(float)); cudaMalloc(&particles_gpuOut.weight, N*sizeof(float)); // Transfer data to GPU // Step 3.* cudaMemset(centerOfMassGPU, 0.0f, 4*sizeof(float)); cudaMemset(lock, 0, sizeof(int)); // Step 0-2 cudaMemcpy(particles_gpuIn.pos_x, particles_cpu.pos_x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpuIn.pos_y, particles_cpu.pos_y, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpuIn.pos_z, particles_cpu.pos_z, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpuIn.vel_x, particles_cpu.vel_x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpuIn.vel_y, particles_cpu.vel_y, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpuIn.vel_z, particles_cpu.vel_z, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpuIn.weight, particles_cpu.weight, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpuOut.weight, particles_cpu.weight, N*sizeof(float), cudaMemcpyHostToDevice); // Streams and events allocation Step 4 cudaStream_t stream_integrate, stream_com, stream_memcpy; cudaStreamCreate(&stream_integrate); cudaStreamCreate(&stream_com); cudaStreamCreate(&stream_memcpy); cudaEvent_t particles_finished, com_finished; cudaEventCreate(&particles_finished); cudaEventCreate(&com_finished); gettimeofday(&t1, 0); for(struct {int s = 0; int record_num = 0;} loop; loop.s < steps; loop.s++) { // Kernel invocation in stream_integrate calculate_velocity<<<simulationGrid, thr_blc, shared_mem_size, stream_integrate>>>(particles_gpuIn, particles_gpuOut, N, dt); // Inserting an event indicating the completion of the particle position calculation cudaEventRecord(particles_finished, stream_integrate); if (writeFreq > 0 && (loop.s % writeFreq == 0)) { // Kernel invocation in stream_com cudaMemsetAsync(centerOfMassGPU, 0.0f, 4*sizeof(float), stream_com); centerOfMass<<<reductionGrid, red_thr_blc, reduction_shared_mem_size, stream_com>>>(particles_gpuIn, &centerOfMassGPU->x, &centerOfMassGPU->y, &centerOfMassGPU->z, &centerOfMassGPU->w, lock, N); // Inserting an event indicating the completion of the center of mass calculation cudaEventRecord(com_finished, stream_com); // Transfer practicles to CPU in stream_memcpy cudaMemcpyAsync(particles_cpu.pos_x, particles_gpuIn.pos_x, N*sizeof(float), cudaMemcpyDeviceToHost, stream_memcpy); cudaMemcpyAsync(particles_cpu.pos_y, particles_gpuIn.pos_y, N*sizeof(float), cudaMemcpyDeviceToHost, stream_memcpy); cudaMemcpyAsync(particles_cpu.pos_z, particles_gpuIn.pos_z, N*sizeof(float), cudaMemcpyDeviceToHost, stream_memcpy); cudaMemcpyAsync(particles_cpu.vel_x, particles_gpuIn.vel_x, N*sizeof(float), cudaMemcpyDeviceToHost, stream_memcpy); cudaMemcpyAsync(particles_cpu.vel_y, particles_gpuIn.vel_y, N*sizeof(float), cudaMemcpyDeviceToHost, stream_memcpy); cudaMemcpyAsync(particles_cpu.vel_z, particles_gpuIn.vel_z, N*sizeof(float), cudaMemcpyDeviceToHost, stream_memcpy); // CPU waits until particles data will be available cudaStreamSynchronize(stream_memcpy); // Putting a wait for the com_finished event in the stream_memcpy cudaStreamWaitEvent(stream_memcpy, com_finished, 0); // Transfer com to CPU in stream_memcpy cudaMemcpyAsync(&comOnGPU.x, centerOfMassGPU, 4*sizeof(float), cudaMemcpyDeviceToHost, stream_memcpy); // While com is copied from D2H, CPU is writing particles into output file h5Helper.writeParticleData(loop.record_num); // CPU waits until com data will be available // It also ensures that com ends before a new integrate step begins cudaStreamSynchronize(stream_memcpy); // CPU writes the com data into output file comOnGPU.x = comOnGPU.x / comOnGPU.w; comOnGPU.y = comOnGPU.y / comOnGPU.w; comOnGPU.z = comOnGPU.z / comOnGPU.w; h5Helper.writeCom(comOnGPU.x, comOnGPU.y, comOnGPU.z, comOnGPU.w, loop.record_num++); } // stream_com needs to wait for an input data cudaStreamWaitEvent(stream_com, particles_finished, 0); // stream_memcpy also needs to wait until the particles computing is finished and after // that the data can be copied from D2H cudaStreamWaitEvent(stream_memcpy, particles_finished, 0); // swap pointers particles_tmp = particles_gpuOut; particles_gpuOut = particles_gpuIn; particles_gpuIn = particles_tmp; } cudaDeviceSynchronize(); cudaMemset(centerOfMassGPU, 0.0f, 4*sizeof(float)); // Kernel invoaction centerOfMass<<<reductionGrid, red_thr_blc, reduction_shared_mem_size>>>(particles_gpuIn, &centerOfMassGPU->x, &centerOfMassGPU->y, &centerOfMassGPU->z, &centerOfMassGPU->w, lock, N); gettimeofday(&t2, 0); // Approximate simulation wall time double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0; printf("Time: %f s\n", t); // Transfer results back to the CPU // Step 3.* cudaMemcpy(&comOnGPU.x, centerOfMassGPU, 4*sizeof(float), cudaMemcpyDeviceToHost); // Step 0-2 cudaMemcpy(particles_cpu.pos_x, particles_gpuIn.pos_x, N*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.pos_y, particles_gpuIn.pos_y, N*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.pos_z, particles_gpuIn.pos_z, N*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.vel_x, particles_gpuIn.vel_x, N*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.vel_y, particles_gpuIn.vel_y, N*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.vel_z, particles_gpuIn.vel_z, N*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.weight, particles_gpuIn.weight, N*sizeof(float), cudaMemcpyDeviceToHost); // CPU completes the calculation of CenterOfMass comOnGPU.x = comOnGPU.x / comOnGPU.w; comOnGPU.y = comOnGPU.y / comOnGPU.w; comOnGPU.z = comOnGPU.z / comOnGPU.w; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers for center-of-mass (step 3.1, step 3.2) // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float4 comOnCPU = centerOfMassCPU(md); std::cout << "Center of mass on CPU:" << std::endl << comOnCPU.x <<", " << comOnCPU.y <<", " << comOnCPU.z <<", " << comOnCPU.w << std::endl; std::cout << "Center of mass on GPU:" << std::endl << comOnGPU.x<<", " << comOnGPU.y<<", " << comOnGPU.z<<", " << comOnGPU.w << std::endl; // Writing final values to the file h5Helper.writeComFinal(comOnGPU.x, comOnGPU.y, comOnGPU.z, comOnGPU.w); h5Helper.writeParticleDataFinal(); // Free CPU memory cudaFreeHost(particles_cpu.pos_x ); cudaFreeHost(particles_cpu.pos_y ); cudaFreeHost(particles_cpu.pos_z ); cudaFreeHost(particles_cpu.vel_x ); cudaFreeHost(particles_cpu.vel_y ); cudaFreeHost(particles_cpu.vel_z ); cudaFreeHost(particles_cpu.weight); // Free GPU memory cudaFree(particles_gpuIn.pos_x); cudaFree(particles_gpuIn.pos_y); cudaFree(particles_gpuIn.pos_z); cudaFree(particles_gpuIn.vel_x); cudaFree(particles_gpuIn.vel_y); cudaFree(particles_gpuIn.vel_z); cudaFree(particles_gpuIn.weight); cudaFree(particles_gpuOut.pos_x); cudaFree(particles_gpuOut.pos_y); cudaFree(particles_gpuOut.pos_z); cudaFree(particles_gpuOut.vel_x); cudaFree(particles_gpuOut.vel_y); cudaFree(particles_gpuOut.vel_z); cudaFree(particles_gpuOut.weight); cudaFree(centerOfMassGPU); cudaFree(lock); cudaStreamDestroy(stream_integrate); cudaStreamDestroy(stream_memcpy); cudaStreamDestroy(stream_com); cudaEventDestroy(com_finished); cudaEventDestroy(particles_finished); return 0; }// end of main //----------------------------------------------------------------------------------------------------------------------
88b6fb1cd11fa88126553dc15ce204ca94f47719.hip
// !!! This is a file automatically generated by hipify!!! /* * CUDA code for GPU manual pages on FG */ #include <rocblas.h> #include <hip/hip_runtime.h> #include "matrixMul_kernel.hip" //This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( hipError_t err, const char *file, const int line ) { if( hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } //This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError( const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } //////////////////////////////////////////////////////////////////////////////// // declaration, forward void doMatrixMul(int argc, char** argv); void randomInit(float*, int); void inline checkError(hipblasStatus_t status, const char* msg) { if(status != HIPBLAS_STATUS_SUCCESS){ printf(msg); exit(-1); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { doMatrixMul(argc, argv); } //////////////////////////////////////////////////////////////////////////////// // host function //////////////////////////////////////////////////////////////////////////////// void doMatrixMul(int argc, char** argv) { int size = 32; int devID; hipDeviceProp_t props; checkCudaErrors(hipGetDevice(&devID)); checkCudaErrors(hipGetDeviceProperties(&props, devID)); int block_size = (props.major < 2) ? 16 : 32; unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC; uiWA = size; uiHA = size; uiWB = size; uiHB = size; uiWC = size; uiHC = size; // allocate host memory for matrices A and B unsigned int size_A = uiWA * uiHA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A); unsigned int size_B = uiWB * uiHB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); // initialize host memory srand(2012); randomInit(h_A, size_A); randomInit(h_B, size_B); // allocate device memory float* d_A, *d_B, *d_C; unsigned int size_C = uiWC * uiHC; unsigned int mem_size_C = sizeof(float) * size_C; // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); float* h_CUBLAS = (float*) malloc(mem_size_C); checkCudaErrors(hipMalloc((void**) &d_A, mem_size_A)); checkCudaErrors(hipMalloc((void**) &d_B, mem_size_B)); // copy host memory to device checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) ); checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) ); checkCudaErrors(hipMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(block_size, block_size); dim3 grid(uiWC / threads.x, uiHC / threads.y); if (block_size == 16) { hipLaunchKernelGGL(( matrixMul<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, uiWA, uiWB); } else { hipLaunchKernelGGL(( matrixMul<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, uiWA, uiWB); } hipDeviceSynchronize(); free(h_A); free(h_B); free(h_C); checkCudaErrors(hipFree(d_A)); checkCudaErrors(hipFree(d_B)); checkCudaErrors(hipFree(d_C)); hipDeviceReset(); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; }
88b6fb1cd11fa88126553dc15ce204ca94f47719.cu
/* * CUDA code for GPU manual pages on FG */ #include <cublas_v2.h> #include <cuda_runtime.h> #include "matrixMul_kernel.cu" //This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( cudaError err, const char *file, const int line ) { if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } //This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError( const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } //////////////////////////////////////////////////////////////////////////////// // declaration, forward void doMatrixMul(int argc, char** argv); void randomInit(float*, int); void inline checkError(cublasStatus_t status, const char* msg) { if(status != CUBLAS_STATUS_SUCCESS){ printf(msg); exit(-1); } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { doMatrixMul(argc, argv); } //////////////////////////////////////////////////////////////////////////////// // host function //////////////////////////////////////////////////////////////////////////////// void doMatrixMul(int argc, char** argv) { int size = 32; int devID; cudaDeviceProp props; checkCudaErrors(cudaGetDevice(&devID)); checkCudaErrors(cudaGetDeviceProperties(&props, devID)); int block_size = (props.major < 2) ? 16 : 32; unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC; uiWA = size; uiHA = size; uiWB = size; uiHB = size; uiWC = size; uiHC = size; // allocate host memory for matrices A and B unsigned int size_A = uiWA * uiHA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A); unsigned int size_B = uiWB * uiHB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); // initialize host memory srand(2012); randomInit(h_A, size_A); randomInit(h_B, size_B); // allocate device memory float* d_A, *d_B, *d_C; unsigned int size_C = uiWC * uiHC; unsigned int mem_size_C = sizeof(float) * size_C; // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); float* h_CUBLAS = (float*) malloc(mem_size_C); checkCudaErrors(cudaMalloc((void**) &d_A, mem_size_A)); checkCudaErrors(cudaMalloc((void**) &d_B, mem_size_B)); // copy host memory to device checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) ); checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) ); checkCudaErrors(cudaMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(block_size, block_size); dim3 grid(uiWC / threads.x, uiHC / threads.y); if (block_size == 16) { matrixMul<16><<< grid, threads >>>(d_C, d_A, d_B, uiWA, uiWB); } else { matrixMul<32><<< grid, threads >>>(d_C, d_A, d_B, uiWA, uiWB); } cudaDeviceSynchronize(); free(h_A); free(h_B); free(h_C); checkCudaErrors(cudaFree(d_A)); checkCudaErrors(cudaFree(d_B)); checkCudaErrors(cudaFree(d_C)); cudaDeviceReset(); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; }
a2df3b5afada278f088f2c290899a2cb69f501b7.hip
// !!! This is a file automatically generated by hipify!!! #include "CudaBackend.h" #if CUDA_ENABLED #include <hip/hip_runtime.h> CudaBackend::~CudaBackend() { hipFree(data); } Image CudaBackend::render() { doRender(); hipDeviceSynchronize(); return Image(width, height, data); } void CudaBackend::setResolution(unsigned width, unsigned height) { Backend::setResolution(width, height); hipFree(data); hipHostMalloc(&data, sizeof(Color) * width * height); } #endif //CUDA_ENABLED
a2df3b5afada278f088f2c290899a2cb69f501b7.cu
#include "CudaBackend.h" #if CUDA_ENABLED #include <cuda_runtime.h> CudaBackend::~CudaBackend() { cudaFree(data); } Image CudaBackend::render() { doRender(); cudaDeviceSynchronize(); return Image(width, height, data); } void CudaBackend::setResolution(unsigned width, unsigned height) { Backend::setResolution(width, height); cudaFree(data); cudaMallocHost(&data, sizeof(Color) * width * height); } #endif //CUDA_ENABLED
7c856e3017b6437529661bf9a667ac0dd7b82a74.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "IndexInteranlNode.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; bool *forest = NULL; hipMalloc(&forest, XSIZE*YSIZE); int base = 1; int step = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( IndexInteranlNode), dim3(gridBlock),dim3(threadBlock), 0, 0, forest,base,step); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( IndexInteranlNode), dim3(gridBlock),dim3(threadBlock), 0, 0, forest,base,step); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( IndexInteranlNode), dim3(gridBlock),dim3(threadBlock), 0, 0, forest,base,step); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7c856e3017b6437529661bf9a667ac0dd7b82a74.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "IndexInteranlNode.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; bool *forest = NULL; cudaMalloc(&forest, XSIZE*YSIZE); int base = 1; int step = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); IndexInteranlNode<<<gridBlock,threadBlock>>>(forest,base,step); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { IndexInteranlNode<<<gridBlock,threadBlock>>>(forest,base,step); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { IndexInteranlNode<<<gridBlock,threadBlock>>>(forest,base,step); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0fb8be9e34518b2bb4e362b9ccea8d44891ff515.hip
// !!! This is a file automatically generated by hipify!!! // Author: Patrick Wieschollek <[email protected]> // apply PSF kernel to an image on the GPU // TODO: create batch-version of run_blur_image #include <sstream> #include <iostream> #include <hip/hip_runtime.h> #include <cudnn.h> #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::stringstream strstr; \ strstr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ throw strstr.str(); \ } \ } #define checkCUDA(expression) \ { \ hipError_t error = (expression); \ if (error != hipSuccess) { \ throw std::runtime_error(hipGetErrorString(error)); \ } \ } void run_blur_image ( int gpu_id, const float *img_h, unsigned int iH, unsigned int iW, unsigned int iC, const float *psf_h, unsigned int pH, unsigned int pW, float **output_d) { hipSetDevice(gpu_id); const int image_bytes = iC * iH * iW * sizeof(float); const int psf_bytes = pH * pW * sizeof(float); // copy to device memory float *img_d; checkCUDA(hipMalloc(&img_d, image_bytes)); checkCUDA(hipMemcpy(img_d, img_h, image_bytes, hipMemcpyHostToDevice)); float *psf_d; checkCUDA(hipMalloc(&psf_d, psf_bytes)); checkCUDA(hipMemcpy(psf_d, psf_h, psf_bytes, hipMemcpyHostToDevice)); checkCUDA(hipMalloc(&*output_d, image_bytes)); checkCUDA(hipMemset(*output_d, 0, image_bytes)); cudnnHandle_t cudnn; cudnnCreate(&cudnn); cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor( input_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, iH, iW)); cudnnFilterDescriptor_t kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor( kernel_descriptor, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1, 1, pH, pW)); cudnnConvolutionDescriptor_t convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor( convolution_descriptor, (pH - 1) / 2, (pW - 1) / 2, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); cudnnTensorDescriptor_t output_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor( output_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, iH, iW)); cudnnConvolutionFwdAlgo_t convolution_algorithm; checkCUDNN(cudnnGetConvolutionForwardAlgorithm( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &convolution_algorithm)); size_t workspace_bytes{0}; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, convolution_algorithm, &workspace_bytes)); void* d_workspace{nullptr}; hipMalloc(&d_workspace, workspace_bytes); const float alpha = 1.0f, beta = 0.0f; // no NVIDIA-cudnn depthwise primitive? for (int i = 0; i < iC; ++i) { checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, input_descriptor, img_d + i * iH * iW, kernel_descriptor, psf_d, convolution_descriptor, convolution_algorithm, d_workspace, workspace_bytes, &beta, output_descriptor, *output_d + i * iH * iW)); } checkCUDA(hipFree(psf_d)); checkCUDA(hipFree(img_d)); checkCUDA(hipFree(d_workspace)); cudnnDestroyTensorDescriptor(input_descriptor); cudnnDestroyTensorDescriptor(output_descriptor); cudnnDestroyFilterDescriptor(kernel_descriptor); cudnnDestroyConvolutionDescriptor(convolution_descriptor); cudnnDestroy(cudnn); } void run_blur_image_batch ( int gpu_id, const float *img_h, unsigned int iB, unsigned int iH, unsigned int iW, unsigned int iC, const float *psf_h, unsigned int pB, unsigned int pH, unsigned int pW, float **output_d) { hipSetDevice(gpu_id); const int image_bytes = iB * iC * iH * iW * sizeof(float); const int psf_bytes = pB * pH * pW * sizeof(float); // copy to device memory float *img_d; checkCUDA(hipMalloc(&img_d, image_bytes)); checkCUDA(hipMemcpy(img_d, img_h, image_bytes, hipMemcpyHostToDevice)); float *psf_d; checkCUDA(hipMalloc(&psf_d, psf_bytes)); checkCUDA(hipMemcpy(psf_d, psf_h, psf_bytes, hipMemcpyHostToDevice)); checkCUDA(hipMalloc(&*output_d, image_bytes)); checkCUDA(hipMemset(*output_d, 0, image_bytes)); cudnnHandle_t cudnn; cudnnCreate(&cudnn); cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor( input_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, iH, iW)); cudnnFilterDescriptor_t kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor( kernel_descriptor, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1, 1, pH, pW)); cudnnConvolutionDescriptor_t convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor( convolution_descriptor, (pH - 1) / 2, (pW - 1) / 2, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); cudnnTensorDescriptor_t output_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor( output_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, iH, iW)); cudnnConvolutionFwdAlgo_t convolution_algorithm; checkCUDNN(cudnnGetConvolutionForwardAlgorithm( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &convolution_algorithm)); size_t workspace_bytes{0}; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, convolution_algorithm, &workspace_bytes)); void* d_workspace{nullptr}; hipMalloc(&d_workspace, workspace_bytes); const float alpha = 1.0f, beta = 0.0f; // no NVIDIA-cudnn depthwise primitive? for (int b = 0; b < iB; ++b) { for (int i = 0; i < iC; ++i) { checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, input_descriptor, img_d + b * (iH * iW * iC) + i * (iH * iW), kernel_descriptor, psf_d + b * (pH * pW), convolution_descriptor, convolution_algorithm, d_workspace, workspace_bytes, &beta, output_descriptor, *output_d + b * (iH * iW * iC) + i * (iH * iW))); } } // release device memory checkCUDA(hipFree(psf_d)); checkCUDA(hipFree(img_d)); checkCUDA(hipFree(d_workspace)); cudnnDestroyTensorDescriptor(input_descriptor); cudnnDestroyTensorDescriptor(output_descriptor); cudnnDestroyFilterDescriptor(kernel_descriptor); cudnnDestroyConvolutionDescriptor(convolution_descriptor); cudnnDestroy(cudnn); }
0fb8be9e34518b2bb4e362b9ccea8d44891ff515.cu
// Author: Patrick Wieschollek <[email protected]> // apply PSF kernel to an image on the GPU // TODO: create batch-version of run_blur_image #include <sstream> #include <iostream> #include <cuda_runtime.h> #include <cudnn.h> #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::stringstream strstr; \ strstr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ throw strstr.str(); \ } \ } #define checkCUDA(expression) \ { \ cudaError_t error = (expression); \ if (error != cudaSuccess) { \ throw std::runtime_error(cudaGetErrorString(error)); \ } \ } void run_blur_image ( int gpu_id, const float *img_h, unsigned int iH, unsigned int iW, unsigned int iC, const float *psf_h, unsigned int pH, unsigned int pW, float **output_d) { cudaSetDevice(gpu_id); const int image_bytes = iC * iH * iW * sizeof(float); const int psf_bytes = pH * pW * sizeof(float); // copy to device memory float *img_d; checkCUDA(cudaMalloc(&img_d, image_bytes)); checkCUDA(cudaMemcpy(img_d, img_h, image_bytes, cudaMemcpyHostToDevice)); float *psf_d; checkCUDA(cudaMalloc(&psf_d, psf_bytes)); checkCUDA(cudaMemcpy(psf_d, psf_h, psf_bytes, cudaMemcpyHostToDevice)); checkCUDA(cudaMalloc(&*output_d, image_bytes)); checkCUDA(cudaMemset(*output_d, 0, image_bytes)); cudnnHandle_t cudnn; cudnnCreate(&cudnn); cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor( input_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, iH, iW)); cudnnFilterDescriptor_t kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor( kernel_descriptor, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1, 1, pH, pW)); cudnnConvolutionDescriptor_t convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor( convolution_descriptor, (pH - 1) / 2, (pW - 1) / 2, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); cudnnTensorDescriptor_t output_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor( output_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, iH, iW)); cudnnConvolutionFwdAlgo_t convolution_algorithm; checkCUDNN(cudnnGetConvolutionForwardAlgorithm( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &convolution_algorithm)); size_t workspace_bytes{0}; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, convolution_algorithm, &workspace_bytes)); void* d_workspace{nullptr}; cudaMalloc(&d_workspace, workspace_bytes); const float alpha = 1.0f, beta = 0.0f; // no NVIDIA-cudnn depthwise primitive? for (int i = 0; i < iC; ++i) { checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, input_descriptor, img_d + i * iH * iW, kernel_descriptor, psf_d, convolution_descriptor, convolution_algorithm, d_workspace, workspace_bytes, &beta, output_descriptor, *output_d + i * iH * iW)); } checkCUDA(cudaFree(psf_d)); checkCUDA(cudaFree(img_d)); checkCUDA(cudaFree(d_workspace)); cudnnDestroyTensorDescriptor(input_descriptor); cudnnDestroyTensorDescriptor(output_descriptor); cudnnDestroyFilterDescriptor(kernel_descriptor); cudnnDestroyConvolutionDescriptor(convolution_descriptor); cudnnDestroy(cudnn); } void run_blur_image_batch ( int gpu_id, const float *img_h, unsigned int iB, unsigned int iH, unsigned int iW, unsigned int iC, const float *psf_h, unsigned int pB, unsigned int pH, unsigned int pW, float **output_d) { cudaSetDevice(gpu_id); const int image_bytes = iB * iC * iH * iW * sizeof(float); const int psf_bytes = pB * pH * pW * sizeof(float); // copy to device memory float *img_d; checkCUDA(cudaMalloc(&img_d, image_bytes)); checkCUDA(cudaMemcpy(img_d, img_h, image_bytes, cudaMemcpyHostToDevice)); float *psf_d; checkCUDA(cudaMalloc(&psf_d, psf_bytes)); checkCUDA(cudaMemcpy(psf_d, psf_h, psf_bytes, cudaMemcpyHostToDevice)); checkCUDA(cudaMalloc(&*output_d, image_bytes)); checkCUDA(cudaMemset(*output_d, 0, image_bytes)); cudnnHandle_t cudnn; cudnnCreate(&cudnn); cudnnTensorDescriptor_t input_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor( input_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, iH, iW)); cudnnFilterDescriptor_t kernel_descriptor; checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor)); checkCUDNN(cudnnSetFilter4dDescriptor( kernel_descriptor, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1, 1, pH, pW)); cudnnConvolutionDescriptor_t convolution_descriptor; checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor)); checkCUDNN(cudnnSetConvolution2dDescriptor( convolution_descriptor, (pH - 1) / 2, (pW - 1) / 2, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); cudnnTensorDescriptor_t output_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor( output_descriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, iH, iW)); cudnnConvolutionFwdAlgo_t convolution_algorithm; checkCUDNN(cudnnGetConvolutionForwardAlgorithm( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &convolution_algorithm)); size_t workspace_bytes{0}; checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize( cudnn, input_descriptor, kernel_descriptor, convolution_descriptor, output_descriptor, convolution_algorithm, &workspace_bytes)); void* d_workspace{nullptr}; cudaMalloc(&d_workspace, workspace_bytes); const float alpha = 1.0f, beta = 0.0f; // no NVIDIA-cudnn depthwise primitive? for (int b = 0; b < iB; ++b) { for (int i = 0; i < iC; ++i) { checkCUDNN(cudnnConvolutionForward(cudnn, &alpha, input_descriptor, img_d + b * (iH * iW * iC) + i * (iH * iW), kernel_descriptor, psf_d + b * (pH * pW), convolution_descriptor, convolution_algorithm, d_workspace, workspace_bytes, &beta, output_descriptor, *output_d + b * (iH * iW * iC) + i * (iH * iW))); } } // release device memory checkCUDA(cudaFree(psf_d)); checkCUDA(cudaFree(img_d)); checkCUDA(cudaFree(d_workspace)); cudnnDestroyTensorDescriptor(input_descriptor); cudnnDestroyTensorDescriptor(output_descriptor); cudnnDestroyFilterDescriptor(kernel_descriptor); cudnnDestroyConvolutionDescriptor(convolution_descriptor); cudnnDestroy(cudnn); }
e0975730f1b045d248b946b56071251e486e90a4.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/cusolver_wrappers.h> #include <raft/handle.hpp> #include <raft/linalg/cholesky_r1_update.cuh> #include <raft/mr/device/allocator.hpp> #include <raft/mr/device/buffer.hpp> #include <sstream> #include <vector> #include "../test_utils.h" namespace raft { namespace linalg { template <typename math_t> class CholeskyR1Test : public ::testing::Test { protected: CholeskyR1Test() : allocator(handle.get_device_allocator()), G(allocator, handle.get_stream(), n_rows * n_rows), L(allocator, handle.get_stream(), n_rows * n_rows), L_exp(allocator, handle.get_stream(), n_rows * n_rows), devInfo(allocator, handle.get_stream(), 1), workspace(allocator, handle.get_stream()) { CUDA_CHECK(hipStreamCreate(&stream)); handle.set_stream(stream); raft::update_device(G.data(), G_host, n_rows * n_rows, stream); // Allocate workspace solver_handle = handle.get_cusolver_dn_handle(); CUSOLVER_CHECK(raft::linalg::cusolverDnpotrf_bufferSize( solver_handle, HIPBLAS_FILL_MODE_LOWER, n_rows, L.data(), n_rows, &Lwork)); int n_bytes = 0; // Initializing in HIPBLAS_FILL_MODE_LOWER, because that has larger workspace // requirements. raft::linalg::choleskyRank1Update(handle, L.data(), n_rows, n_rows, nullptr, &n_bytes, HIPBLAS_FILL_MODE_LOWER, stream); Lwork = ::max(Lwork * sizeof(math_t), (size_t)n_bytes); workspace.resize(Lwork, stream); } void TearDown() override { CUDA_CHECK(hipStreamDestroy(stream)); } void testR1Update() { int n = n_rows * n_rows; std::vector<hipblasFillMode_t> fillmode{HIPBLAS_FILL_MODE_LOWER, HIPBLAS_FILL_MODE_UPPER}; for (auto uplo : fillmode) { raft::copy(L.data(), G.data(), n, stream); for (int rank = 1; rank <= n_rows; rank++) { std::stringstream ss; ss << "Rank " << rank << ((uplo == HIPBLAS_FILL_MODE_LOWER) ? ", lower" : ", upper"); SCOPED_TRACE(ss.str()); // Expected solution using Cholesky factorization from scratch raft::copy(L_exp.data(), G.data(), n, stream); CUSOLVER_CHECK(raft::linalg::cusolverDnpotrf( solver_handle, uplo, rank, L_exp.data(), n_rows, (math_t*)workspace.data(), Lwork, devInfo.data(), stream)); // Incremental Cholesky factorization using rank one updates. raft::linalg::choleskyRank1Update(handle, L.data(), rank, n_rows, workspace.data(), &Lwork, uplo, stream); ASSERT_TRUE(raft::devArrMatch(L_exp.data(), L.data(), n_rows * rank, raft::CompareApprox<math_t>(3e-3))); } } } void testR1Error() { raft::update_device(G.data(), G2_host, 4, stream); std::vector<hipblasFillMode_t> fillmode{HIPBLAS_FILL_MODE_LOWER, HIPBLAS_FILL_MODE_UPPER}; for (auto uplo : fillmode) { raft::copy(L.data(), G.data(), 4, stream); ASSERT_NO_THROW(raft::linalg::choleskyRank1Update( handle, L.data(), 1, 2, workspace.data(), &Lwork, uplo, stream)); ASSERT_THROW( raft::linalg::choleskyRank1Update( handle, L.data(), 2, 2, workspace.data(), &Lwork, uplo, stream), raft::exception); math_t eps = std::numeric_limits<math_t>::epsilon(); ASSERT_NO_THROW(raft::linalg::choleskyRank1Update( handle, L.data(), 2, 2, workspace.data(), &Lwork, uplo, stream, eps)); } } raft::handle_t handle; std::shared_ptr<raft::mr::device::allocator> allocator; hipsolverDnHandle_t solver_handle; hipStream_t stream; int n_rows = 4; int Lwork; math_t G_host[16] = // clang-format off {107., 1393., 1141., 91., 1393., 21132., 15689., 9539., 1141., 15689., 13103., 2889., 91., 9539., 2889., 23649.}; // clang-format on math_t G2_host[4] = {3, 4, 2, 1}; raft::mr::device::buffer<int> devInfo; raft::mr::device::buffer<math_t> G; raft::mr::device::buffer<math_t> L_exp; raft::mr::device::buffer<math_t> L; raft::mr::device::buffer<char> workspace; }; typedef ::testing::Types<float, double> FloatTypes; TYPED_TEST_CASE(CholeskyR1Test, FloatTypes); TYPED_TEST(CholeskyR1Test, update) { this->testR1Update(); } TYPED_TEST(CholeskyR1Test, throwError) { this->testR1Error(); } }; // namespace linalg }; // namespace raft
e0975730f1b045d248b946b56071251e486e90a4.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/linalg/cusolver_wrappers.h> #include <raft/handle.hpp> #include <raft/linalg/cholesky_r1_update.cuh> #include <raft/mr/device/allocator.hpp> #include <raft/mr/device/buffer.hpp> #include <sstream> #include <vector> #include "../test_utils.h" namespace raft { namespace linalg { template <typename math_t> class CholeskyR1Test : public ::testing::Test { protected: CholeskyR1Test() : allocator(handle.get_device_allocator()), G(allocator, handle.get_stream(), n_rows * n_rows), L(allocator, handle.get_stream(), n_rows * n_rows), L_exp(allocator, handle.get_stream(), n_rows * n_rows), devInfo(allocator, handle.get_stream(), 1), workspace(allocator, handle.get_stream()) { CUDA_CHECK(cudaStreamCreate(&stream)); handle.set_stream(stream); raft::update_device(G.data(), G_host, n_rows * n_rows, stream); // Allocate workspace solver_handle = handle.get_cusolver_dn_handle(); CUSOLVER_CHECK(raft::linalg::cusolverDnpotrf_bufferSize( solver_handle, CUBLAS_FILL_MODE_LOWER, n_rows, L.data(), n_rows, &Lwork)); int n_bytes = 0; // Initializing in CUBLAS_FILL_MODE_LOWER, because that has larger workspace // requirements. raft::linalg::choleskyRank1Update(handle, L.data(), n_rows, n_rows, nullptr, &n_bytes, CUBLAS_FILL_MODE_LOWER, stream); Lwork = std::max(Lwork * sizeof(math_t), (size_t)n_bytes); workspace.resize(Lwork, stream); } void TearDown() override { CUDA_CHECK(cudaStreamDestroy(stream)); } void testR1Update() { int n = n_rows * n_rows; std::vector<cublasFillMode_t> fillmode{CUBLAS_FILL_MODE_LOWER, CUBLAS_FILL_MODE_UPPER}; for (auto uplo : fillmode) { raft::copy(L.data(), G.data(), n, stream); for (int rank = 1; rank <= n_rows; rank++) { std::stringstream ss; ss << "Rank " << rank << ((uplo == CUBLAS_FILL_MODE_LOWER) ? ", lower" : ", upper"); SCOPED_TRACE(ss.str()); // Expected solution using Cholesky factorization from scratch raft::copy(L_exp.data(), G.data(), n, stream); CUSOLVER_CHECK(raft::linalg::cusolverDnpotrf( solver_handle, uplo, rank, L_exp.data(), n_rows, (math_t*)workspace.data(), Lwork, devInfo.data(), stream)); // Incremental Cholesky factorization using rank one updates. raft::linalg::choleskyRank1Update(handle, L.data(), rank, n_rows, workspace.data(), &Lwork, uplo, stream); ASSERT_TRUE(raft::devArrMatch(L_exp.data(), L.data(), n_rows * rank, raft::CompareApprox<math_t>(3e-3))); } } } void testR1Error() { raft::update_device(G.data(), G2_host, 4, stream); std::vector<cublasFillMode_t> fillmode{CUBLAS_FILL_MODE_LOWER, CUBLAS_FILL_MODE_UPPER}; for (auto uplo : fillmode) { raft::copy(L.data(), G.data(), 4, stream); ASSERT_NO_THROW(raft::linalg::choleskyRank1Update( handle, L.data(), 1, 2, workspace.data(), &Lwork, uplo, stream)); ASSERT_THROW( raft::linalg::choleskyRank1Update( handle, L.data(), 2, 2, workspace.data(), &Lwork, uplo, stream), raft::exception); math_t eps = std::numeric_limits<math_t>::epsilon(); ASSERT_NO_THROW(raft::linalg::choleskyRank1Update( handle, L.data(), 2, 2, workspace.data(), &Lwork, uplo, stream, eps)); } } raft::handle_t handle; std::shared_ptr<raft::mr::device::allocator> allocator; cusolverDnHandle_t solver_handle; cudaStream_t stream; int n_rows = 4; int Lwork; math_t G_host[16] = // clang-format off {107., 1393., 1141., 91., 1393., 21132., 15689., 9539., 1141., 15689., 13103., 2889., 91., 9539., 2889., 23649.}; // clang-format on math_t G2_host[4] = {3, 4, 2, 1}; raft::mr::device::buffer<int> devInfo; raft::mr::device::buffer<math_t> G; raft::mr::device::buffer<math_t> L_exp; raft::mr::device::buffer<math_t> L; raft::mr::device::buffer<char> workspace; }; typedef ::testing::Types<float, double> FloatTypes; TYPED_TEST_CASE(CholeskyR1Test, FloatTypes); TYPED_TEST(CholeskyR1Test, update) { this->testR1Update(); } TYPED_TEST(CholeskyR1Test, throwError) { this->testR1Error(); } }; // namespace linalg }; // namespace raft
e8bdf36b14b5282ae08331722d74687186b92f8c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "compute_shared.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int *destination_offsets = NULL; hipMalloc(&destination_offsets, XSIZE*YSIZE); const int *source_indices = NULL; hipMalloc(&source_indices, XSIZE*YSIZE); const int *out_degrees = NULL; hipMalloc(&out_degrees, XSIZE*YSIZE); const int node_count = 1; const float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( compute_shared), dim3(gridBlock),dim3(threadBlock), 0, 0, destination_offsets,source_indices,out_degrees,node_count,input,output); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( compute_shared), dim3(gridBlock),dim3(threadBlock), 0, 0, destination_offsets,source_indices,out_degrees,node_count,input,output); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( compute_shared), dim3(gridBlock),dim3(threadBlock), 0, 0, destination_offsets,source_indices,out_degrees,node_count,input,output); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e8bdf36b14b5282ae08331722d74687186b92f8c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "compute_shared.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int *destination_offsets = NULL; cudaMalloc(&destination_offsets, XSIZE*YSIZE); const int *source_indices = NULL; cudaMalloc(&source_indices, XSIZE*YSIZE); const int *out_degrees = NULL; cudaMalloc(&out_degrees, XSIZE*YSIZE); const int node_count = 1; const float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); compute_shared<<<gridBlock,threadBlock>>>(destination_offsets,source_indices,out_degrees,node_count,input,output); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { compute_shared<<<gridBlock,threadBlock>>>(destination_offsets,source_indices,out_degrees,node_count,input,output); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { compute_shared<<<gridBlock,threadBlock>>>(destination_offsets,source_indices,out_degrees,node_count,input,output); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cc458444eea4be40f7a8b3c935254d02f3204283.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> __global__ void mykernel(void) { } int main(void) {hipLaunchKernelGGL(( mykernel), dim3(1),dim3(1), 0, 0, ); printf("Hello World!\n"); return 0; }
cc458444eea4be40f7a8b3c935254d02f3204283.cu
#include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> __global__ void mykernel(void) { } int main(void) { mykernel<<<1,1>>>(); printf("Hello World!\n"); return 0; }
efa3a416ec6abbbf3c353dd250f8eb1091c557a6.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/hip/Normalization.cuh> namespace at { namespace native { std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(Tensor& output, Tensor& save_mean, Tensor& save_invstd, const Tensor& self, const Tensor& weight, const Tensor& bias, const Tensor& running_mean, const Tensor& running_var, bool train, double momentum, double epsilon) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_cuda", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { batch_norm_cuda_template<scalar_t, float, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } else { batch_norm_cuda_template<scalar_t, scalar_t, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } } else { if (is_half_float || is_bfloat16_float) { batch_norm_cuda_template<scalar_t, float, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } else { batch_norm_cuda_template<scalar_t, scalar_t, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } } }); }); return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const Tensor& weight, const Tensor& bias, const Tensor& running_mean, const Tensor& running_var, bool train, double momentum, double epsilon) { auto output = at::empty_like(self, at::MemoryFormat::Contiguous); int64_t n_input = self.size(1); auto input_options = self.options(); // Accumulate in higher precision if input is half/bfloat16 if (self.scalar_type() == at::ScalarType::Half || self.scalar_type() == at::ScalarType::BFloat16) { input_options = input_options.dtype(ScalarType::Float); } Tensor save_mean, save_invstd; if (train) { save_mean = at::empty({n_input}, input_options); save_invstd = at::empty({n_input}, input_options); } else { save_mean = at::empty({0}, input_options); save_invstd = at::empty({0}, input_options); } batch_norm_cuda_out(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); return std::make_tuple(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& self, const Tensor& weight, const Tensor& running_mean, const Tensor& running_var, const Tensor& save_mean, const Tensor& save_invstd, bool train, double epsilon, std::array<bool,3> grad_input_mask) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_backward_cuda", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } }); }); } std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_stats_cuda", [&] { if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_stats_cuda_template<scalar_t, int32_t>(self, epsilon); } else { return batch_norm_stats_cuda_template<scalar_t, int64_t>(self, epsilon); } }); }); } Tensor batch_norm_elemt_cuda(const Tensor& self, const Tensor& weight, const Tensor& bias, const Tensor& mean, const Tensor& invstd, double epsilon) { auto output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); batch_norm_elemt_cuda_out(output, self, weight, bias, mean, invstd, epsilon); return output; } Tensor& batch_norm_elemt_cuda_out(Tensor& output, const Tensor& self, const Tensor& weight, const Tensor& bias, const Tensor& mean, const Tensor& invstd, double epsilon) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_elemt", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { batch_norm_elemt_cuda_template<scalar_t, float, int32_t>(output, self, weight, bias, mean, invstd, epsilon); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>(output, self, weight, bias, mean, invstd, epsilon); } } else { if (is_half_float || is_bfloat16_float) { batch_norm_elemt_cuda_template<scalar_t, float, int64_t>(output, self, weight, bias, mean, invstd, epsilon); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int64_t>(output, self, weight, bias, mean, invstd, epsilon); } } }); }); return output; } // accepting input(self) here to determine template data types, since running_mean/running_var are optional std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const Tensor& running_mean, const Tensor& running_var, double momentum, double epsilon, int64_t count) { std::vector<int64_t> counts(mean.size(0), count); Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); counts_ = counts_.to(self.device()).to(running_mean.dtype()); return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); } std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const Tensor& running_mean, const Tensor& running_var, double momentum, double epsilon, const Tensor& counts) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_update_stats_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } else { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } }); }); } std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const Tensor& weight, bool input_g, bool weight_g, bool bias_g) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_reduce", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_backward_reduce", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } }); }); } Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const Tensor& weight, const Tensor& mean_dy, const Tensor& mean_dy_xmu) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_backward_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu); } } }); }); } std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda( const Tensor& self, const Tensor& running_mean, const Tensor& running_var, double momentum) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_backward", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); // <sigh> Some workloads depend on passing in half input and float stats, which is // usually handled by cuDNN. However, the JIT sometimes replaces cuDNN calls with this // one so it needs to support the same case, or people start to complain. bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_update_stats_cuda_template<scalar_t, float, int32_t>(self, running_mean, running_var, momentum); } else { return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int32_t>(self, running_mean, running_var, momentum); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_update_stats_cuda_template<scalar_t, float, int64_t>(self, running_mean, running_var, momentum); } else { return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int64_t>(self, running_mean, running_var, momentum); } } }); }); } } } // namespace at::native
efa3a416ec6abbbf3c353dd250f8eb1091c557a6.cu
#include <ATen/native/cuda/Normalization.cuh> namespace at { namespace native { std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(Tensor& output, Tensor& save_mean, Tensor& save_invstd, const Tensor& self, const Tensor& weight, const Tensor& bias, const Tensor& running_mean, const Tensor& running_var, bool train, double momentum, double epsilon) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_cuda", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { batch_norm_cuda_template<scalar_t, float, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } else { batch_norm_cuda_template<scalar_t, scalar_t, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } } else { if (is_half_float || is_bfloat16_float) { batch_norm_cuda_template<scalar_t, float, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } else { batch_norm_cuda_template<scalar_t, scalar_t, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } } }); }); return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const Tensor& weight, const Tensor& bias, const Tensor& running_mean, const Tensor& running_var, bool train, double momentum, double epsilon) { auto output = at::empty_like(self, at::MemoryFormat::Contiguous); int64_t n_input = self.size(1); auto input_options = self.options(); // Accumulate in higher precision if input is half/bfloat16 if (self.scalar_type() == at::ScalarType::Half || self.scalar_type() == at::ScalarType::BFloat16) { input_options = input_options.dtype(ScalarType::Float); } Tensor save_mean, save_invstd; if (train) { save_mean = at::empty({n_input}, input_options); save_invstd = at::empty({n_input}, input_options); } else { save_mean = at::empty({0}, input_options); save_invstd = at::empty({0}, input_options); } batch_norm_cuda_out(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); return std::make_tuple(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& self, const Tensor& weight, const Tensor& running_mean, const Tensor& running_var, const Tensor& save_mean, const Tensor& save_invstd, bool train, double epsilon, std::array<bool,3> grad_input_mask) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_backward_cuda", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } }); }); } std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_stats_cuda", [&] { if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_stats_cuda_template<scalar_t, int32_t>(self, epsilon); } else { return batch_norm_stats_cuda_template<scalar_t, int64_t>(self, epsilon); } }); }); } Tensor batch_norm_elemt_cuda(const Tensor& self, const Tensor& weight, const Tensor& bias, const Tensor& mean, const Tensor& invstd, double epsilon) { auto output = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); batch_norm_elemt_cuda_out(output, self, weight, bias, mean, invstd, epsilon); return output; } Tensor& batch_norm_elemt_cuda_out(Tensor& output, const Tensor& self, const Tensor& weight, const Tensor& bias, const Tensor& mean, const Tensor& invstd, double epsilon) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_elemt", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { batch_norm_elemt_cuda_template<scalar_t, float, int32_t>(output, self, weight, bias, mean, invstd, epsilon); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>(output, self, weight, bias, mean, invstd, epsilon); } } else { if (is_half_float || is_bfloat16_float) { batch_norm_elemt_cuda_template<scalar_t, float, int64_t>(output, self, weight, bias, mean, invstd, epsilon); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int64_t>(output, self, weight, bias, mean, invstd, epsilon); } } }); }); return output; } // accepting input(self) here to determine template data types, since running_mean/running_var are optional std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const Tensor& running_mean, const Tensor& running_var, double momentum, double epsilon, int64_t count) { std::vector<int64_t> counts(mean.size(0), count); Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); counts_ = counts_.to(self.device()).to(running_mean.dtype()); return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); } std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const Tensor& running_mean, const Tensor& running_var, double momentum, double epsilon, const Tensor& counts) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, running_mean.scalar_type(), "batch_norm_update_stats_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_update_stats_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } else { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } }); }); } std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const Tensor& weight, bool input_g, bool weight_g, bool bias_g) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_reduce", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_backward_reduce", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } }); }); } Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const Tensor& weight, const Tensor& mean_dy, const Tensor& mean_dy_xmu) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_backward_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, mean_dy, mean_dy_xmu); } } }); }); } std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda( const Tensor& self, const Tensor& running_mean, const Tensor& running_var, double momentum) { return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "batch_norm_backward", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); // <sigh> Some workloads depend on passing in half input and float stats, which is // usually handled by cuDNN. However, the JIT sometimes replaces cuDNN calls with this // one so it needs to support the same case, or people start to complain. bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_update_stats_cuda_template<scalar_t, float, int32_t>(self, running_mean, running_var, momentum); } else { return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int32_t>(self, running_mean, running_var, momentum); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_update_stats_cuda_template<scalar_t, float, int64_t>(self, running_mean, running_var, momentum); } else { return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int64_t>(self, running_mean, running_var, momentum); } } }); }); } } } // namespace at::native
e3493638465399a90ce2c346990a96055fd66f80.hip
// !!! This is a file automatically generated by hipify!!! // // Created by nick on 4/3/18. // #include "hip/hip_runtime.h" #include <stdio.h> #include <host_defines.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <sys/time.h> #define TILE_DIM 16 typedef enum { MAT_TL = 0, // Top left MAT_TR = 1, // Top right MAT_BL = 2, // Bottom left MAT_BR = 3 // Bottom right } MAT_POS; inline void findStart(int N, MAT_POS pos, int *i, int *j, int *i_end, int *j_end) { int isTop = (pos < MAT_BL); int isRight = (pos % 2 == 1); *i = (isTop) ? 0 : N/2; *j = (isRight) ? N/2 : 0; *i_end = (isTop) ? N/2 : N; *j_end = (isRight) ? N : N/2; } __global__ void coalescedMultiply(float *a, float *b, float *c, int N) { __shared__ float aTile[TILE_DIM][TILE_DIM], bTile[TILE_DIM][TILE_DIM]; int row = blockIdx.y * TILE_DIM + threadIdx.y; int col = blockIdx.x * TILE_DIM + threadIdx.x; float sum = 0.0f; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * N + sub * TILE_DIM + threadIdx.x; if (idx >= N*N) { aTile[threadIdx.y][threadIdx.x] = 0; } else { aTile[threadIdx.y][threadIdx.x] = a[idx]; } idx = (sub * TILE_DIM + threadIdx.y) * N + col; if (idx >= N*N) { bTile[threadIdx.y][threadIdx.x] = 0; } else { bTile[threadIdx.y][threadIdx.x] = b[idx]; } __syncthreads(); for (int k = 0; k < TILE_DIM; k++) { sum += aTile[threadIdx.y][k] * bTile[k][threadIdx.x]; } __syncthreads(); } if (row < N && col < N) c[row * N + col] = sum; } static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) // gets the current time in seconds with microsecond precision double get_time() { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec + t.tv_usec * 1e-6; } void fillIdentity(int N, float *matrix, MAT_POS pos, float scalar) { int i_start, j_start, i_end, j_end; findStart(N, pos, &i_start, &j_start, &i_end, &j_end); for (int i = i_start; i < i_end; i++) { for (int j = j_start; j < j_end; j++) { matrix[i*N+j] = (i == j) ? (scalar) : 0.0f; } } } void fillZeros(int N, float *matrix, MAT_POS pos) { int i_start, j_start, i_end, j_end; findStart(N, pos, &i_start, &j_start, &i_end, &j_end); for (int i = i_start; i < i_end; i++) { for (int j = j_start; j < j_end; j++) { matrix[i*N+j] = 0.0f; } } } void fillRand(int N, float *matrix, MAT_POS pos, float scalar) { int i_start, j_start, i_end, j_end; findStart(N, pos, &i_start, &j_start, &i_end, &j_end); for (int i = i_start; i < i_end; i++) { for (int j = j_start; j < j_end; j++) { matrix[i*N+j] = scalar * (rand() / (float)RAND_MAX); } } } #define fabs(val) (val) < 0.0f ? (-(val)) : (val) float mat_diff(int N, float *matrix1, float *matrix2) { float diff = 0.0f; for (int ij = 0; ij < (N*N); ij++) { int i = ij / N; int j = (ij / N) % N; diff += fabs(matrix1[i*N+j] - matrix2[i*N+j]); } return diff; } void printMa(int N, float *matrix) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%.1f ", matrix[i*N+j]); } printf("\n"); } } float rothVerf_parallel(int ext) { int N = ext*2; // Configuration dim3 dimGrid(((N + TILE_DIM - 1) / TILE_DIM), ((N + TILE_DIM - 1) / TILE_DIM)); dim3 dimBlock(TILE_DIM, TILE_DIM); srand(100); // Memory allocation float *matrix_1, *matrix_2, *result; matrix_1 = (float*)malloc(N*N*sizeof(float)); matrix_2 = (float*)malloc(N*N*sizeof(float)); result = (float*)malloc(N*N*sizeof(float)); hipEvent_t start, stop; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); HANDLE_ERROR(hipEventRecord(start, 0)); float *d_m1, *d_m2, *d_m3; HANDLE_ERROR(hipMalloc((void**)&d_m1, N*N*sizeof(float))); HANDLE_ERROR(hipMalloc((void**)&d_m2, N*N*sizeof(float))); HANDLE_ERROR(hipMalloc((void**)&d_m3, N*N*sizeof(float))); // As per serial, fill these matrices with the right values fillIdentity(N, matrix_1, MAT_TL, 1.0f); fillRand(N, matrix_1, MAT_TR, 1.0f); fillZeros(N, matrix_1, MAT_BL); fillIdentity(N, matrix_1, MAT_BR, 1.0f); fillIdentity(N, matrix_2, MAT_TL, 1.0f); fillRand(N, matrix_2, MAT_TR, 2.0f); fillZeros(N, matrix_2, MAT_BL); fillIdentity(N, matrix_2, MAT_BR, -1.0f); // Copy over the first two matrices, and set up our result matrix HANDLE_ERROR(hipMemcpy(d_m1, matrix_1, N*N*sizeof(float), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_m2, matrix_2, N*N*sizeof(float), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemset(d_m3, 0, N*N*sizeof(float))); // Do the multiplication hipLaunchKernelGGL(( coalescedMultiply), dim3(dimGrid), dim3(dimBlock), 0, 0, d_m1, d_m2, d_m3, N); HANDLE_ERROR(hipDeviceSynchronize()); // Wait for completion // So now d_m3 is our result matrix // Re-use matrix_1 for last matrix, to memcpy over to dm_1 fillIdentity(N, matrix_1, MAT_TL, 1.0f); fillRand(N, matrix_1, MAT_TR, -1.0f); fillZeros(N, matrix_1, MAT_BL); fillIdentity(N, matrix_1, MAT_BR, 1.0f); // Multiply d_m3 * d_m1 into d_m2 // But first set our result matrix to all 0s HANDLE_ERROR(hipMemset(d_m2, 0, N*N*sizeof(float))); // Now copy over the new matrix_1 HANDLE_ERROR(hipMemcpy(d_m1, matrix_1, N*N*sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( coalescedMultiply), dim3(dimGrid), dim3(dimBlock), 0, 0, d_m3, d_m1, d_m2, N); HANDLE_ERROR(hipDeviceSynchronize()); // Wait for completion // Copy over d_m2 into matrix_1 for comparison HANDLE_ERROR(hipMemcpy(matrix_1, d_m2, N*N*sizeof(float), hipMemcpyDeviceToHost)); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); float gpu_elapsed_ms; HANDLE_ERROR(hipEventElapsedTime(&gpu_elapsed_ms, start, stop)); printf("Done calculating! Elapsed time: %.1f ms\n", gpu_elapsed_ms); // Re-use result for RHS matrix fillIdentity(N, result, MAT_TL, 1.0f); fillZeros(N, result, MAT_TR); fillZeros(N, result, MAT_BL); fillIdentity(N, result, MAT_BR, -1.0f); //printf("LHS:\n"); //printMa(N, matrix_1); // Get the error sum float error = mat_diff(N, result, matrix_1); // Free up memory free(matrix_1); free(matrix_2); free(result); HANDLE_ERROR(hipFree(d_m1)); HANDLE_ERROR(hipFree(d_m2)); HANDLE_ERROR(hipFree(d_m3)); // Return the error sum return error; } int main(int argc, char** argv) { int N = 5000; if (argc > 1) N = atoi(argv[1]); printf("Calculating CUDA with N=%d\n", N); double start = get_time(); float err = rothVerf_parallel(N); printf("Error is %.1f in total runtime of %.2f ms\n", err, (get_time() - start) * 1000.0f); return 0; }
e3493638465399a90ce2c346990a96055fd66f80.cu
// // Created by nick on 4/3/18. // #include "cuda.h" #include <stdio.h> #include <host_defines.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> #include <sys/time.h> #define TILE_DIM 16 typedef enum { MAT_TL = 0, // Top left MAT_TR = 1, // Top right MAT_BL = 2, // Bottom left MAT_BR = 3 // Bottom right } MAT_POS; inline void findStart(int N, MAT_POS pos, int *i, int *j, int *i_end, int *j_end) { int isTop = (pos < MAT_BL); int isRight = (pos % 2 == 1); *i = (isTop) ? 0 : N/2; *j = (isRight) ? N/2 : 0; *i_end = (isTop) ? N/2 : N; *j_end = (isRight) ? N : N/2; } __global__ void coalescedMultiply(float *a, float *b, float *c, int N) { __shared__ float aTile[TILE_DIM][TILE_DIM], bTile[TILE_DIM][TILE_DIM]; int row = blockIdx.y * TILE_DIM + threadIdx.y; int col = blockIdx.x * TILE_DIM + threadIdx.x; float sum = 0.0f; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * N + sub * TILE_DIM + threadIdx.x; if (idx >= N*N) { aTile[threadIdx.y][threadIdx.x] = 0; } else { aTile[threadIdx.y][threadIdx.x] = a[idx]; } idx = (sub * TILE_DIM + threadIdx.y) * N + col; if (idx >= N*N) { bTile[threadIdx.y][threadIdx.x] = 0; } else { bTile[threadIdx.y][threadIdx.x] = b[idx]; } __syncthreads(); for (int k = 0; k < TILE_DIM; k++) { sum += aTile[threadIdx.y][k] * bTile[k][threadIdx.x]; } __syncthreads(); } if (row < N && col < N) c[row * N + col] = sum; } static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) // gets the current time in seconds with microsecond precision double get_time() { struct timeval t; gettimeofday(&t, NULL); return t.tv_sec + t.tv_usec * 1e-6; } void fillIdentity(int N, float *matrix, MAT_POS pos, float scalar) { int i_start, j_start, i_end, j_end; findStart(N, pos, &i_start, &j_start, &i_end, &j_end); for (int i = i_start; i < i_end; i++) { for (int j = j_start; j < j_end; j++) { matrix[i*N+j] = (i == j) ? (scalar) : 0.0f; } } } void fillZeros(int N, float *matrix, MAT_POS pos) { int i_start, j_start, i_end, j_end; findStart(N, pos, &i_start, &j_start, &i_end, &j_end); for (int i = i_start; i < i_end; i++) { for (int j = j_start; j < j_end; j++) { matrix[i*N+j] = 0.0f; } } } void fillRand(int N, float *matrix, MAT_POS pos, float scalar) { int i_start, j_start, i_end, j_end; findStart(N, pos, &i_start, &j_start, &i_end, &j_end); for (int i = i_start; i < i_end; i++) { for (int j = j_start; j < j_end; j++) { matrix[i*N+j] = scalar * (rand() / (float)RAND_MAX); } } } #define fabs(val) (val) < 0.0f ? (-(val)) : (val) float mat_diff(int N, float *matrix1, float *matrix2) { float diff = 0.0f; for (int ij = 0; ij < (N*N); ij++) { int i = ij / N; int j = (ij / N) % N; diff += fabs(matrix1[i*N+j] - matrix2[i*N+j]); } return diff; } void printMa(int N, float *matrix) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%.1f ", matrix[i*N+j]); } printf("\n"); } } float rothVerf_parallel(int ext) { int N = ext*2; // Configuration dim3 dimGrid(((N + TILE_DIM - 1) / TILE_DIM), ((N + TILE_DIM - 1) / TILE_DIM)); dim3 dimBlock(TILE_DIM, TILE_DIM); srand(100); // Memory allocation float *matrix_1, *matrix_2, *result; matrix_1 = (float*)malloc(N*N*sizeof(float)); matrix_2 = (float*)malloc(N*N*sizeof(float)); result = (float*)malloc(N*N*sizeof(float)); cudaEvent_t start, stop; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); HANDLE_ERROR(cudaEventRecord(start, 0)); float *d_m1, *d_m2, *d_m3; HANDLE_ERROR(cudaMalloc((void**)&d_m1, N*N*sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&d_m2, N*N*sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&d_m3, N*N*sizeof(float))); // As per serial, fill these matrices with the right values fillIdentity(N, matrix_1, MAT_TL, 1.0f); fillRand(N, matrix_1, MAT_TR, 1.0f); fillZeros(N, matrix_1, MAT_BL); fillIdentity(N, matrix_1, MAT_BR, 1.0f); fillIdentity(N, matrix_2, MAT_TL, 1.0f); fillRand(N, matrix_2, MAT_TR, 2.0f); fillZeros(N, matrix_2, MAT_BL); fillIdentity(N, matrix_2, MAT_BR, -1.0f); // Copy over the first two matrices, and set up our result matrix HANDLE_ERROR(cudaMemcpy(d_m1, matrix_1, N*N*sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_m2, matrix_2, N*N*sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemset(d_m3, 0, N*N*sizeof(float))); // Do the multiplication coalescedMultiply<<<dimGrid, dimBlock>>>(d_m1, d_m2, d_m3, N); HANDLE_ERROR(cudaDeviceSynchronize()); // Wait for completion // So now d_m3 is our result matrix // Re-use matrix_1 for last matrix, to memcpy over to dm_1 fillIdentity(N, matrix_1, MAT_TL, 1.0f); fillRand(N, matrix_1, MAT_TR, -1.0f); fillZeros(N, matrix_1, MAT_BL); fillIdentity(N, matrix_1, MAT_BR, 1.0f); // Multiply d_m3 * d_m1 into d_m2 // But first set our result matrix to all 0s HANDLE_ERROR(cudaMemset(d_m2, 0, N*N*sizeof(float))); // Now copy over the new matrix_1 HANDLE_ERROR(cudaMemcpy(d_m1, matrix_1, N*N*sizeof(float), cudaMemcpyHostToDevice)); coalescedMultiply<<<dimGrid, dimBlock>>>(d_m3, d_m1, d_m2, N); HANDLE_ERROR(cudaThreadSynchronize()); // Wait for completion // Copy over d_m2 into matrix_1 for comparison HANDLE_ERROR(cudaMemcpy(matrix_1, d_m2, N*N*sizeof(float), cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); float gpu_elapsed_ms; HANDLE_ERROR(cudaEventElapsedTime(&gpu_elapsed_ms, start, stop)); printf("Done calculating! Elapsed time: %.1f ms\n", gpu_elapsed_ms); // Re-use result for RHS matrix fillIdentity(N, result, MAT_TL, 1.0f); fillZeros(N, result, MAT_TR); fillZeros(N, result, MAT_BL); fillIdentity(N, result, MAT_BR, -1.0f); //printf("LHS:\n"); //printMa(N, matrix_1); // Get the error sum float error = mat_diff(N, result, matrix_1); // Free up memory free(matrix_1); free(matrix_2); free(result); HANDLE_ERROR(cudaFree(d_m1)); HANDLE_ERROR(cudaFree(d_m2)); HANDLE_ERROR(cudaFree(d_m3)); // Return the error sum return error; } int main(int argc, char** argv) { int N = 5000; if (argc > 1) N = atoi(argv[1]); printf("Calculating CUDA with N=%d\n", N); double start = get_time(); float err = rothVerf_parallel(N); printf("Error is %.1f in total runtime of %.2f ms\n", err, (get_time() - start) * 1000.0f); return 0; }
c7f3bcf0301bef8cdd9b79f8390ec902db84882a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <string> #include "caffe/layers/kl_loss_layer.hpp" namespace caffe { template <typename Dtype> __global__ void KlForward(const int n, const Dtype* in, const Dtype* alpha, Dtype* out){ // f(x) = e^(-alpha) * (x-1/2*th) + alpha/2 if |x| > th // = e^(-alpha) * x^2 * 1/2/th + alpha/2 if |x| <= th CUDA_KERNEL_LOOP(index, n) { Dtype x = in[index]; Dtype abs_x = abs(x); Dtype a = alpha[index]; if (abs_x > 1) { out[index] = exp(-a) * (abs_x - 0.5) + a * 0.5; } else { out[index] = exp(-a) * x * x * 0.5 + a * 0.5; } } } template <typename Dtype> void KlLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); caffe_gpu_sub(count, bottom[0]->gpu_data(), bottom[2]->gpu_data(), diff_.mutable_gpu_data()); hipLaunchKernelGGL(( KlForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diff_.gpu_data(), bottom[1]->gpu_data(), error_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; normalize_divider_=bottom[0]->count(); if (has_weights_) { caffe_gpu_mul(count, bottom[3]->gpu_data(), error_.gpu_data(), error_.mutable_gpu_data()); caffe_gpu_asum(count, bottom[3]->gpu_data(), &normalize_divider_); } Dtype loss; caffe_gpu_asum(count, error_.mutable_gpu_data(), &loss); normalize_divider_+=0.00001f; top[0]->mutable_cpu_data()[0]=loss/normalize_divider_; } template <typename Dtype> __global__ void KlBackward(const int n, const Dtype* in1, const Dtype* in2, const Dtype* in3, const Dtype* in4, Dtype* out1, Dtype* out2) { // f'(xe) = e^(-alpha) * (xe - xg) if |xg - xe| <= 1 // = -e^(-alpha) if |xg - xe| > 1 and xg > xe // = e^(-alpha) if |xg - xe| > 1 and xg < xe // // f'(alpha) = -(xg - xe)^2 * 0.5 * e^(-alpha) + 0.5 if |xg - xe| <= 1 // = -(abs(xg-xe) - 0.5) * e^(-alpha) + 0.5 CUDA_KERNEL_LOOP(index, n) { Dtype d = in1[index];//xe - xg Dtype xe = in2[index]; Dtype xg = in3[index]; Dtype alpha = in4[index]; Dtype abs_d = abs(d); Dtype ea = exp(-alpha); if (abs_d <= 1) { out1[index] = ea * d; out2[index] = -d*d * 0.5 * ea + 0.5; } else { if (xg > xe) { out1[index] = -ea; } else { out1[index] = ea; } out2[index] = -(abs_d - 0.5) * ea + 0.5; } } } template <typename Dtype> void KlLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0] && propagate_down[1]){ int count = diff_.count(); Dtype* bottom_diff1 = bottom[0]->mutable_gpu_diff(); Dtype* bottom_diff2 = bottom[1]->mutable_gpu_diff(); hipLaunchKernelGGL(( KlBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, diff_.gpu_data(), bottom[0]->gpu_data(),bottom[2]->gpu_data(), bottom[1]->gpu_data(), bottom_diff1, bottom_diff2); CUDA_POST_KERNEL_CHECK; if (has_weights_) { const Dtype* label_weight = bottom[3]->gpu_data(); caffe_gpu_mul(count, label_weight, bottom[0]->gpu_diff(), bottom_diff1); caffe_gpu_mul(count, label_weight, bottom[1]->gpu_diff(), bottom_diff2); } const Dtype loss_weight = top[0]->cpu_diff()[0] / normalize_divider_; caffe_gpu_scal(count, loss_weight , bottom_diff1); caffe_gpu_scal(count, loss_weight , bottom_diff2); } } INSTANTIATE_LAYER_GPU_FUNCS(KlLossLayer); } // namespace caffe
c7f3bcf0301bef8cdd9b79f8390ec902db84882a.cu
#include <vector> #include <string> #include "caffe/layers/kl_loss_layer.hpp" namespace caffe { template <typename Dtype> __global__ void KlForward(const int n, const Dtype* in, const Dtype* alpha, Dtype* out){ // f(x) = e^(-alpha) * (x-1/2*th) + alpha/2 if |x| > th // = e^(-alpha) * x^2 * 1/2/th + alpha/2 if |x| <= th CUDA_KERNEL_LOOP(index, n) { Dtype x = in[index]; Dtype abs_x = abs(x); Dtype a = alpha[index]; if (abs_x > 1) { out[index] = exp(-a) * (abs_x - 0.5) + a * 0.5; } else { out[index] = exp(-a) * x * x * 0.5 + a * 0.5; } } } template <typename Dtype> void KlLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int count = bottom[0]->count(); caffe_gpu_sub(count, bottom[0]->gpu_data(), bottom[2]->gpu_data(), diff_.mutable_gpu_data()); KlForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diff_.gpu_data(), bottom[1]->gpu_data(), error_.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; normalize_divider_=bottom[0]->count(); if (has_weights_) { caffe_gpu_mul(count, bottom[3]->gpu_data(), error_.gpu_data(), error_.mutable_gpu_data()); caffe_gpu_asum(count, bottom[3]->gpu_data(), &normalize_divider_); } Dtype loss; caffe_gpu_asum(count, error_.mutable_gpu_data(), &loss); normalize_divider_+=0.00001f; top[0]->mutable_cpu_data()[0]=loss/normalize_divider_; } template <typename Dtype> __global__ void KlBackward(const int n, const Dtype* in1, const Dtype* in2, const Dtype* in3, const Dtype* in4, Dtype* out1, Dtype* out2) { // f'(xe) = e^(-alpha) * (xe - xg) if |xg - xe| <= 1 // = -e^(-alpha) if |xg - xe| > 1 and xg > xe // = e^(-alpha) if |xg - xe| > 1 and xg < xe // // f'(alpha) = -(xg - xe)^2 * 0.5 * e^(-alpha) + 0.5 if |xg - xe| <= 1 // = -(abs(xg-xe) - 0.5) * e^(-alpha) + 0.5 CUDA_KERNEL_LOOP(index, n) { Dtype d = in1[index];//xe - xg Dtype xe = in2[index]; Dtype xg = in3[index]; Dtype alpha = in4[index]; Dtype abs_d = abs(d); Dtype ea = exp(-alpha); if (abs_d <= 1) { out1[index] = ea * d; out2[index] = -d*d * 0.5 * ea + 0.5; } else { if (xg > xe) { out1[index] = -ea; } else { out1[index] = ea; } out2[index] = -(abs_d - 0.5) * ea + 0.5; } } } template <typename Dtype> void KlLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0] && propagate_down[1]){ int count = diff_.count(); Dtype* bottom_diff1 = bottom[0]->mutable_gpu_diff(); Dtype* bottom_diff2 = bottom[1]->mutable_gpu_diff(); KlBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, diff_.gpu_data(), bottom[0]->gpu_data(),bottom[2]->gpu_data(), bottom[1]->gpu_data(), bottom_diff1, bottom_diff2); CUDA_POST_KERNEL_CHECK; if (has_weights_) { const Dtype* label_weight = bottom[3]->gpu_data(); caffe_gpu_mul(count, label_weight, bottom[0]->gpu_diff(), bottom_diff1); caffe_gpu_mul(count, label_weight, bottom[1]->gpu_diff(), bottom_diff2); } const Dtype loss_weight = top[0]->cpu_diff()[0] / normalize_divider_; caffe_gpu_scal(count, loss_weight , bottom_diff1); caffe_gpu_scal(count, loss_weight , bottom_diff2); } } INSTANTIATE_LAYER_GPU_FUNCS(KlLossLayer); } // namespace caffe
cc3421f43158c36914eb17f5b3bea6bc931ebe12.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by Tuowen Zhao on 2/17/19. // #include "stencils/fake.h" #include "stencils/stencils.h" #include <brick-cuda.h> #include <brick-mpi.h> #include <brick.h> #include <bricksetup.h> #include <iostream> #include <mpi.h> #include "bitset.h" #include "stencils/cudaarray.h" #include "stencils/cudavfold.h" #include <brickcompare.h> #include <multiarray.h> #include <array-mpi.h> #include <unistd.h> #include "args.h" typedef Brick<Dim<BDIM>, Dim<VFOLD>> Brick3D; __global__ void arr_kernel(bElem *in_ptr, bElem *out_ptr, unsigned *stride) { long k = PADDING + blockIdx.z * TILE + threadIdx.z; long j = PADDING + blockIdx.y * TILE + threadIdx.y; long i = PADDING + blockIdx.x * TILE + threadIdx.x; long pos = i + j * stride[1] + k * stride[2]; ST_GPU; } __global__ void brick_kernel(unsigned *grid, Brick3D in, Brick3D out, unsigned *stride) { unsigned bk = blockIdx.z; unsigned bj = blockIdx.y; unsigned bi = blockIdx.x; unsigned b = grid[bi + (bj + bk * stride[1]) * stride[0]]; brick(ST_SCRTPT, VSVEC, (BDIM), (VFOLD), b); } int main(int argc, char **argv) { MPI_ITER = 100; int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &provided); if (provided != MPI_THREAD_SERIALIZED) { MPI_Finalize(); return 1; } MPI_Comm cart = parseArgs(argc, argv, "cuda"); if (cart != MPI_COMM_NULL) { int size, rank; MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MEMFD::setup_prefix("mpi-main", rank); int prd[3] = {1, 1, 1}; int coo[3]; MPI_Cart_get(cart, 3, (int *)dim_size.data(), prd, coo); std::vector<long> stride(3), strideb(3), strideg(3); for (int i = 0; i < 3; ++i) { stride[i] = dom_size[i] + 2 * TILE + 2 * GZ; strideg[i] = dom_size[i] + 2 * TILE; strideb[i] = strideg[i] / TILE; } bElem *in_ptr = randomArray(stride); BrickDecomp<3, BDIM> bDecomp(dom_size, GZ); bDecomp.comm = cart; populate(cart, bDecomp, 0, 1, coo); auto bSize = cal_size<BDIM>::value; bDecomp.initialize(skin3d_good); BrickInfo<3> bInfo = bDecomp.getBrickInfo(); #ifdef DECOMP_PAGEUNALIGN auto bStorage = bInfo.allocate(bSize); auto bStorageOut = bInfo.allocate(bSize); #else auto bStorage = bInfo.mmap_alloc(bSize); auto bStorageOut = bInfo.mmap_alloc(bSize); #endif auto grid_ptr = (unsigned *)malloc(sizeof(unsigned) * strideb[2] * strideb[1] * strideb[0]); auto grid = (unsigned(*)[strideb[1]][strideb[0]])grid_ptr; for (long k = 0; k < strideb[2]; ++k) for (long j = 0; j < strideb[1]; ++j) for (long i = 0; i < strideb[0]; ++i) grid[k][j][i] = bDecomp[k][j][i]; for (long k = 1; k < strideb[2] - 1; ++k) for (long j = 1; j < strideb[1] - 1; ++j) for (long i = 1; i < strideb[0] - 1; ++i) { auto l = grid[k][j][i]; for (long id = 0; id < 27; ++id) if (bInfo.adj[bInfo.adj[l][id]][26 - id] != l) throw std::runtime_error("err"); } Brick3D bIn(&bInfo, bStorage, 0); Brick3D bOut(&bInfo, bStorageOut, 0); copyToBrick<3>(strideg, {PADDING, PADDING, PADDING}, {0, 0, 0}, in_ptr, grid_ptr, bIn); bElem *out_ptr = zeroArray({stride[0], stride[1], stride[2]}); unsigned *arr_stride_dev = nullptr; { unsigned arr_stride_tmp[3]; unsigned s = 1; for (int i = 0; i < 3; ++i) { arr_stride_tmp[i] = s; s *= stride[i]; } copyToDevice({3}, arr_stride_dev, arr_stride_tmp); } bElem *in_ptr_dev = nullptr; bElem *out_ptr_dev = nullptr; copyToDevice(stride, in_ptr_dev, in_ptr); copyToDevice(stride, out_ptr_dev, out_ptr); size_t tsize = 0; for (auto &g : bDecomp.ghost) tsize += g.len * bStorage.step * sizeof(bElem) * 2; std::unordered_map<uint64_t, MPI_Datatype> stypemap; std::unordered_map<uint64_t, MPI_Datatype> rtypemap; exchangeArrPrepareTypes<3>(stypemap, rtypemap, {dom_size[0], dom_size[1], dom_size[2]}, {PADDING, PADDING, PADDING}, {GZ, GZ, GZ}); auto arr_func = [&]() -> void { float elapsed; hipEvent_t c_0, c_1; hipEventCreate(&c_0); hipEventCreate(&c_1); #if !defined(CUDA_AWARE) || !defined(USE_TYPES) // Copy everything back from device double st = omp_get_wtime(); copyFromDevice(stride, in_ptr, in_ptr_dev); movetime += omp_get_wtime() - st; exchangeArr<3>(in_ptr, cart, bDecomp.rank_map, {dom_size[0], dom_size[1], dom_size[2]}, {PADDING, PADDING, PADDING}, {GZ, GZ, GZ}); st = omp_get_wtime(); copyToDevice(stride, in_ptr_dev, in_ptr); movetime += omp_get_wtime() - st; #else exchangeArrTypes<3>(in_ptr_dev, cart, bDecomp.rank_map, stypemap, rtypemap); #endif hipEventRecord(c_0); dim3 block(strideb[0], strideb[1], strideb[2]), thread(TILE, TILE, TILE); for (int i = 0; i < ST_ITER / 2; ++i) { hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread), 0, 0, in_ptr_dev, out_ptr_dev, arr_stride_dev); hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread), 0, 0, out_ptr_dev, in_ptr_dev, arr_stride_dev); } hipEventRecord(c_1); hipEventSynchronize(c_1); hipEventElapsedTime(&elapsed, c_0, c_1); calctime += elapsed / 1000.0; }; if (rank == 0) std::cout << "d3pt7 MPI decomp" << std::endl; int cnt; double total; total = time_mpi(arr_func, cnt, bDecomp); cnt *= ST_ITER; // Copy back copyFromDevice(stride, out_ptr, out_ptr_dev); hipFree(in_ptr_dev); hipFree(out_ptr_dev); hipFree(arr_stride_dev); { mpi_stats calc_s = mpi_statistics(calctime / cnt, MPI_COMM_WORLD); mpi_stats call_s = mpi_statistics(calltime / cnt, MPI_COMM_WORLD); mpi_stats wait_s = mpi_statistics(waittime / cnt, MPI_COMM_WORLD); mpi_stats mspd_s = mpi_statistics(tsize / 1.0e9 / (calltime + waittime) * cnt, MPI_COMM_WORLD); mpi_stats move_s = mpi_statistics(movetime / cnt, MPI_COMM_WORLD); mpi_stats pack_s = mpi_statistics(packtime / cnt, MPI_COMM_WORLD); mpi_stats size_s = mpi_statistics((double)tsize * 1.0e-6, MPI_COMM_WORLD); if (rank == 0) { total = calc_s.avg + call_s.avg + wait_s.avg + move_s.avg + pack_s.avg; std::cout << "Arr: " << total << std::endl; std::cout << "calc " << calc_s << std::endl; std::cout << "pack " << pack_s << std::endl; std::cout << "move " << move_s << std::endl; std::cout << "call " << call_s << std::endl; std::cout << "wait " << wait_s << std::endl; std::cout << " | MPI size (MB): " << size_s << std::endl; std::cout << " | MPI speed (GB/s): " << mspd_s << std::endl; double perf = (double)tot_elems * 1.0e-9; perf = perf / total; std::cout << "perf " << perf << " GStencil/s" << std::endl; std::cout << std::endl; } } // setup brick on device BrickInfo<3> *bInfo_dev; auto _bInfo_dev = movBrickInfo(bInfo, hipMemcpyHostToDevice); { unsigned size = sizeof(BrickInfo<3>); hipMalloc(&bInfo_dev, size); hipMemcpy(bInfo_dev, &_bInfo_dev, size, hipMemcpyHostToDevice); } BrickStorage bStorage_dev = movBrickStorage(bStorage, hipMemcpyHostToDevice); BrickStorage bStorageOut_dev = movBrickStorage(bStorageOut, hipMemcpyHostToDevice); Brick3D bIn_dev(bInfo_dev, bStorage_dev, 0); Brick3D bOut_dev(bInfo_dev, bStorageOut_dev, 0); unsigned *grid_dev_ptr = nullptr; copyToDevice(strideb, grid_dev_ptr, grid_ptr); unsigned *grid_stride_dev = nullptr; { unsigned grid_stride_tmp[3]; for (int i = 0; i < 3; ++i) grid_stride_tmp[i] = strideb[i]; copyToDevice({3}, grid_stride_dev, grid_stride_tmp); } #ifndef DECOMP_PAGEUNALIGN ExchangeView ev = bDecomp.exchangeView(bStorage); #endif auto brick_func = [&]() -> void { float elapsed; hipEvent_t c_0, c_1; hipEventCreate(&c_0); hipEventCreate(&c_1); #ifndef CUDA_AWARE { double t_a = omp_get_wtime(); hipMemcpy(bStorage.dat.get() + bStorage.step * bDecomp.sep_pos[0], bStorage_dev.dat.get() + bStorage.step * bDecomp.sep_pos[0], bStorage.step * (bDecomp.sep_pos[1] - bDecomp.sep_pos[0]) * sizeof(bElem), hipMemcpyDeviceToHost); double t_b = omp_get_wtime(); movetime += t_b - t_a; #ifdef DECOMP_PAGEUNALIGN bDecomp.exchange(bStorage); #else ev.exchange(); #endif t_a = omp_get_wtime(); hipMemcpy(bStorage_dev.dat.get() + bStorage.step * bDecomp.sep_pos[1], bStorage.dat.get() + bStorage.step * bDecomp.sep_pos[1], bStorage.step * (bDecomp.sep_pos[2] - bDecomp.sep_pos[1]) * sizeof(bElem), hipMemcpyHostToDevice); t_b = omp_get_wtime(); movetime += t_b - t_a; } #else bDecomp.exchange(bStorage_dev); #endif dim3 block(strideb[0], strideb[1], strideb[2]), thread(32); hipEventRecord(c_0); for (int i = 0; i < ST_ITER / 2; ++i) { hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread), 0, 0, grid_dev_ptr, bIn_dev, bOut_dev, grid_stride_dev); hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread), 0, 0, grid_dev_ptr, bOut_dev, bIn_dev, grid_stride_dev); } hipEventRecord(c_1); hipEventSynchronize(c_1); hipEventElapsedTime(&elapsed, c_0, c_1); calctime += elapsed / 1000.0; }; total = time_mpi(brick_func, cnt, bDecomp); cnt *= ST_ITER; // Copy back hipMemcpy(bStorageOut.dat.get(), bStorageOut_dev.dat.get(), bStorageOut.step * bStorageOut.chunks * sizeof(bElem), hipMemcpyDeviceToHost); { mpi_stats calc_s = mpi_statistics(calctime / cnt, MPI_COMM_WORLD); mpi_stats call_s = mpi_statistics(calltime / cnt, MPI_COMM_WORLD); mpi_stats wait_s = mpi_statistics(waittime / cnt, MPI_COMM_WORLD); mpi_stats mspd_s = mpi_statistics(tsize / 1.0e9 / (calltime + waittime) * cnt, MPI_COMM_WORLD); mpi_stats size_s = mpi_statistics((double)tsize * 1.0e-6, MPI_COMM_WORLD); mpi_stats move_s = mpi_statistics(movetime / cnt, MPI_COMM_WORLD); if (rank == 0) { total = calc_s.avg + call_s.avg + wait_s.avg + move_s.avg; std::cout << "Bri: " << total << std::endl; std::cout << "calc " << calc_s << std::endl; std::cout << "move " << move_s << std::endl; std::cout << "call " << call_s << std::endl; std::cout << "wait " << wait_s << std::endl; std::cout << " | MPI size (MB): " << size_s << std::endl; std::cout << " | MPI speed (GB/s): " << mspd_s << std::endl; double perf = (double)tot_elems * 1.0e-9; perf = perf / total; std::cout << "perf " << perf << " GStencil/s" << std::endl; } } if (!compareBrick<3>({dom_size[0], dom_size[1], dom_size[2]}, {PADDING, PADDING, PADDING}, {GZ, GZ, GZ}, out_ptr, grid_ptr, bOut)) std::cout << "result mismatch!" << std::endl; free(bInfo.adj); free(out_ptr); free(in_ptr); #ifndef DECOMP_PAGEUNALIGN ((MEMFD *)bStorage.mmap_info)->cleanup(); ((MEMFD *)bStorageOut.mmap_info)->cleanup(); #endif } MPI_Finalize(); return 0; }
cc3421f43158c36914eb17f5b3bea6bc931ebe12.cu
// // Created by Tuowen Zhao on 2/17/19. // #include "stencils/fake.h" #include "stencils/stencils.h" #include <brick-cuda.h> #include <brick-mpi.h> #include <brick.h> #include <bricksetup.h> #include <iostream> #include <mpi.h> #include "bitset.h" #include "stencils/cudaarray.h" #include "stencils/cudavfold.h" #include <brickcompare.h> #include <multiarray.h> #include <array-mpi.h> #include <unistd.h> #include "args.h" typedef Brick<Dim<BDIM>, Dim<VFOLD>> Brick3D; __global__ void arr_kernel(bElem *in_ptr, bElem *out_ptr, unsigned *stride) { long k = PADDING + blockIdx.z * TILE + threadIdx.z; long j = PADDING + blockIdx.y * TILE + threadIdx.y; long i = PADDING + blockIdx.x * TILE + threadIdx.x; long pos = i + j * stride[1] + k * stride[2]; ST_GPU; } __global__ void brick_kernel(unsigned *grid, Brick3D in, Brick3D out, unsigned *stride) { unsigned bk = blockIdx.z; unsigned bj = blockIdx.y; unsigned bi = blockIdx.x; unsigned b = grid[bi + (bj + bk * stride[1]) * stride[0]]; brick(ST_SCRTPT, VSVEC, (BDIM), (VFOLD), b); } int main(int argc, char **argv) { MPI_ITER = 100; int provided; MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &provided); if (provided != MPI_THREAD_SERIALIZED) { MPI_Finalize(); return 1; } MPI_Comm cart = parseArgs(argc, argv, "cuda"); if (cart != MPI_COMM_NULL) { int size, rank; MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MEMFD::setup_prefix("mpi-main", rank); int prd[3] = {1, 1, 1}; int coo[3]; MPI_Cart_get(cart, 3, (int *)dim_size.data(), prd, coo); std::vector<long> stride(3), strideb(3), strideg(3); for (int i = 0; i < 3; ++i) { stride[i] = dom_size[i] + 2 * TILE + 2 * GZ; strideg[i] = dom_size[i] + 2 * TILE; strideb[i] = strideg[i] / TILE; } bElem *in_ptr = randomArray(stride); BrickDecomp<3, BDIM> bDecomp(dom_size, GZ); bDecomp.comm = cart; populate(cart, bDecomp, 0, 1, coo); auto bSize = cal_size<BDIM>::value; bDecomp.initialize(skin3d_good); BrickInfo<3> bInfo = bDecomp.getBrickInfo(); #ifdef DECOMP_PAGEUNALIGN auto bStorage = bInfo.allocate(bSize); auto bStorageOut = bInfo.allocate(bSize); #else auto bStorage = bInfo.mmap_alloc(bSize); auto bStorageOut = bInfo.mmap_alloc(bSize); #endif auto grid_ptr = (unsigned *)malloc(sizeof(unsigned) * strideb[2] * strideb[1] * strideb[0]); auto grid = (unsigned(*)[strideb[1]][strideb[0]])grid_ptr; for (long k = 0; k < strideb[2]; ++k) for (long j = 0; j < strideb[1]; ++j) for (long i = 0; i < strideb[0]; ++i) grid[k][j][i] = bDecomp[k][j][i]; for (long k = 1; k < strideb[2] - 1; ++k) for (long j = 1; j < strideb[1] - 1; ++j) for (long i = 1; i < strideb[0] - 1; ++i) { auto l = grid[k][j][i]; for (long id = 0; id < 27; ++id) if (bInfo.adj[bInfo.adj[l][id]][26 - id] != l) throw std::runtime_error("err"); } Brick3D bIn(&bInfo, bStorage, 0); Brick3D bOut(&bInfo, bStorageOut, 0); copyToBrick<3>(strideg, {PADDING, PADDING, PADDING}, {0, 0, 0}, in_ptr, grid_ptr, bIn); bElem *out_ptr = zeroArray({stride[0], stride[1], stride[2]}); unsigned *arr_stride_dev = nullptr; { unsigned arr_stride_tmp[3]; unsigned s = 1; for (int i = 0; i < 3; ++i) { arr_stride_tmp[i] = s; s *= stride[i]; } copyToDevice({3}, arr_stride_dev, arr_stride_tmp); } bElem *in_ptr_dev = nullptr; bElem *out_ptr_dev = nullptr; copyToDevice(stride, in_ptr_dev, in_ptr); copyToDevice(stride, out_ptr_dev, out_ptr); size_t tsize = 0; for (auto &g : bDecomp.ghost) tsize += g.len * bStorage.step * sizeof(bElem) * 2; std::unordered_map<uint64_t, MPI_Datatype> stypemap; std::unordered_map<uint64_t, MPI_Datatype> rtypemap; exchangeArrPrepareTypes<3>(stypemap, rtypemap, {dom_size[0], dom_size[1], dom_size[2]}, {PADDING, PADDING, PADDING}, {GZ, GZ, GZ}); auto arr_func = [&]() -> void { float elapsed; cudaEvent_t c_0, c_1; cudaEventCreate(&c_0); cudaEventCreate(&c_1); #if !defined(CUDA_AWARE) || !defined(USE_TYPES) // Copy everything back from device double st = omp_get_wtime(); copyFromDevice(stride, in_ptr, in_ptr_dev); movetime += omp_get_wtime() - st; exchangeArr<3>(in_ptr, cart, bDecomp.rank_map, {dom_size[0], dom_size[1], dom_size[2]}, {PADDING, PADDING, PADDING}, {GZ, GZ, GZ}); st = omp_get_wtime(); copyToDevice(stride, in_ptr_dev, in_ptr); movetime += omp_get_wtime() - st; #else exchangeArrTypes<3>(in_ptr_dev, cart, bDecomp.rank_map, stypemap, rtypemap); #endif cudaEventRecord(c_0); dim3 block(strideb[0], strideb[1], strideb[2]), thread(TILE, TILE, TILE); for (int i = 0; i < ST_ITER / 2; ++i) { arr_kernel<<<block, thread>>>(in_ptr_dev, out_ptr_dev, arr_stride_dev); arr_kernel<<<block, thread>>>(out_ptr_dev, in_ptr_dev, arr_stride_dev); } cudaEventRecord(c_1); cudaEventSynchronize(c_1); cudaEventElapsedTime(&elapsed, c_0, c_1); calctime += elapsed / 1000.0; }; if (rank == 0) std::cout << "d3pt7 MPI decomp" << std::endl; int cnt; double total; total = time_mpi(arr_func, cnt, bDecomp); cnt *= ST_ITER; // Copy back copyFromDevice(stride, out_ptr, out_ptr_dev); cudaFree(in_ptr_dev); cudaFree(out_ptr_dev); cudaFree(arr_stride_dev); { mpi_stats calc_s = mpi_statistics(calctime / cnt, MPI_COMM_WORLD); mpi_stats call_s = mpi_statistics(calltime / cnt, MPI_COMM_WORLD); mpi_stats wait_s = mpi_statistics(waittime / cnt, MPI_COMM_WORLD); mpi_stats mspd_s = mpi_statistics(tsize / 1.0e9 / (calltime + waittime) * cnt, MPI_COMM_WORLD); mpi_stats move_s = mpi_statistics(movetime / cnt, MPI_COMM_WORLD); mpi_stats pack_s = mpi_statistics(packtime / cnt, MPI_COMM_WORLD); mpi_stats size_s = mpi_statistics((double)tsize * 1.0e-6, MPI_COMM_WORLD); if (rank == 0) { total = calc_s.avg + call_s.avg + wait_s.avg + move_s.avg + pack_s.avg; std::cout << "Arr: " << total << std::endl; std::cout << "calc " << calc_s << std::endl; std::cout << "pack " << pack_s << std::endl; std::cout << "move " << move_s << std::endl; std::cout << "call " << call_s << std::endl; std::cout << "wait " << wait_s << std::endl; std::cout << " | MPI size (MB): " << size_s << std::endl; std::cout << " | MPI speed (GB/s): " << mspd_s << std::endl; double perf = (double)tot_elems * 1.0e-9; perf = perf / total; std::cout << "perf " << perf << " GStencil/s" << std::endl; std::cout << std::endl; } } // setup brick on device BrickInfo<3> *bInfo_dev; auto _bInfo_dev = movBrickInfo(bInfo, cudaMemcpyHostToDevice); { unsigned size = sizeof(BrickInfo<3>); cudaMalloc(&bInfo_dev, size); cudaMemcpy(bInfo_dev, &_bInfo_dev, size, cudaMemcpyHostToDevice); } BrickStorage bStorage_dev = movBrickStorage(bStorage, cudaMemcpyHostToDevice); BrickStorage bStorageOut_dev = movBrickStorage(bStorageOut, cudaMemcpyHostToDevice); Brick3D bIn_dev(bInfo_dev, bStorage_dev, 0); Brick3D bOut_dev(bInfo_dev, bStorageOut_dev, 0); unsigned *grid_dev_ptr = nullptr; copyToDevice(strideb, grid_dev_ptr, grid_ptr); unsigned *grid_stride_dev = nullptr; { unsigned grid_stride_tmp[3]; for (int i = 0; i < 3; ++i) grid_stride_tmp[i] = strideb[i]; copyToDevice({3}, grid_stride_dev, grid_stride_tmp); } #ifndef DECOMP_PAGEUNALIGN ExchangeView ev = bDecomp.exchangeView(bStorage); #endif auto brick_func = [&]() -> void { float elapsed; cudaEvent_t c_0, c_1; cudaEventCreate(&c_0); cudaEventCreate(&c_1); #ifndef CUDA_AWARE { double t_a = omp_get_wtime(); cudaMemcpy(bStorage.dat.get() + bStorage.step * bDecomp.sep_pos[0], bStorage_dev.dat.get() + bStorage.step * bDecomp.sep_pos[0], bStorage.step * (bDecomp.sep_pos[1] - bDecomp.sep_pos[0]) * sizeof(bElem), cudaMemcpyDeviceToHost); double t_b = omp_get_wtime(); movetime += t_b - t_a; #ifdef DECOMP_PAGEUNALIGN bDecomp.exchange(bStorage); #else ev.exchange(); #endif t_a = omp_get_wtime(); cudaMemcpy(bStorage_dev.dat.get() + bStorage.step * bDecomp.sep_pos[1], bStorage.dat.get() + bStorage.step * bDecomp.sep_pos[1], bStorage.step * (bDecomp.sep_pos[2] - bDecomp.sep_pos[1]) * sizeof(bElem), cudaMemcpyHostToDevice); t_b = omp_get_wtime(); movetime += t_b - t_a; } #else bDecomp.exchange(bStorage_dev); #endif dim3 block(strideb[0], strideb[1], strideb[2]), thread(32); cudaEventRecord(c_0); for (int i = 0; i < ST_ITER / 2; ++i) { brick_kernel<<<block, thread>>>(grid_dev_ptr, bIn_dev, bOut_dev, grid_stride_dev); brick_kernel<<<block, thread>>>(grid_dev_ptr, bOut_dev, bIn_dev, grid_stride_dev); } cudaEventRecord(c_1); cudaEventSynchronize(c_1); cudaEventElapsedTime(&elapsed, c_0, c_1); calctime += elapsed / 1000.0; }; total = time_mpi(brick_func, cnt, bDecomp); cnt *= ST_ITER; // Copy back cudaMemcpy(bStorageOut.dat.get(), bStorageOut_dev.dat.get(), bStorageOut.step * bStorageOut.chunks * sizeof(bElem), cudaMemcpyDeviceToHost); { mpi_stats calc_s = mpi_statistics(calctime / cnt, MPI_COMM_WORLD); mpi_stats call_s = mpi_statistics(calltime / cnt, MPI_COMM_WORLD); mpi_stats wait_s = mpi_statistics(waittime / cnt, MPI_COMM_WORLD); mpi_stats mspd_s = mpi_statistics(tsize / 1.0e9 / (calltime + waittime) * cnt, MPI_COMM_WORLD); mpi_stats size_s = mpi_statistics((double)tsize * 1.0e-6, MPI_COMM_WORLD); mpi_stats move_s = mpi_statistics(movetime / cnt, MPI_COMM_WORLD); if (rank == 0) { total = calc_s.avg + call_s.avg + wait_s.avg + move_s.avg; std::cout << "Bri: " << total << std::endl; std::cout << "calc " << calc_s << std::endl; std::cout << "move " << move_s << std::endl; std::cout << "call " << call_s << std::endl; std::cout << "wait " << wait_s << std::endl; std::cout << " | MPI size (MB): " << size_s << std::endl; std::cout << " | MPI speed (GB/s): " << mspd_s << std::endl; double perf = (double)tot_elems * 1.0e-9; perf = perf / total; std::cout << "perf " << perf << " GStencil/s" << std::endl; } } if (!compareBrick<3>({dom_size[0], dom_size[1], dom_size[2]}, {PADDING, PADDING, PADDING}, {GZ, GZ, GZ}, out_ptr, grid_ptr, bOut)) std::cout << "result mismatch!" << std::endl; free(bInfo.adj); free(out_ptr); free(in_ptr); #ifndef DECOMP_PAGEUNALIGN ((MEMFD *)bStorage.mmap_info)->cleanup(); ((MEMFD *)bStorageOut.mmap_info)->cleanup(); #endif } MPI_Finalize(); return 0; }
36a041e7427ac0ed39cc2cb8e90268eacc339914.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** High Performance Computing (special course) radix_sort.cu Location: Technical University of Denmark Purpose: Uses GPU to sort series of unsigned integers using Radix Sort EDIT: This radix sort has been modified to take multiple arrays. We need to sort three arrays by some specific array. The radix sort is modified accordingly. @author Elias Obeid @author Alexander Johansen @version 1.0 16/01/2016 */ #include "radix_sort.h" /** Populates array with 1/0 depending on Least Significant Bit is set. If LSB is 0 then index is set to 1, otherwise 0. @param d_predicate Output array to be filled with values (predicates) @param d_sort_by Values to run through @param NUM_ELEMS Number of elements in arrays @param i Used to calculate how much to shift to find the correct LSB */ __global__ void predicate_kernel(unsigned int* const, const unsigned int* const, const size_t, const unsigned int); /** Performs one iteration of Hillis and Steele scan. Inclusive sum scan. @param d_out Output array with summed values @param d_in Values to sum @param step Amount to look back in d_in @param NUM_ELEMS Number of elements in arrays */ __global__ void inclusive_sum_scan_kernel(unsigned int* const, const unsigned int* const, const int, const size_t); /** Shifts all elements to the right. Sets first index to 0. @param d_out Output array @param d_in Array to be shifted @param NUM_ELEMS Number of elements in arrays */ __global__ void right_shift_array_kernel(unsigned int* const, const unsigned int* const, const size_t); /** Toggle array with values 1 and 0. @param d_out Array with toggled values @param d_predicate Array with initial values @param NUM_ELEMS Number of elements in arrays */ __global__ void toggle_predicate_kernel(unsigned int* const, const unsigned int* const, const size_t); /** Adds an offset to the given array's values. @param d_out Input/Output array -- values will be added to offset @param shift Array with one element -- the offset to add @param NUM_ELEMS Number of elements in arrays */ __global__ void add_splitter_map_kernel(unsigned int* const, const unsigned int* const, const size_t); /** Runs log_2(BLOCK_SIZE) iterations of the reduce. Computes the sum of elements in d_in @param d_out Output array @param d_in Input array with values @param NUM_ELEMS Number of elements in arrays */ __global__ void reduce_kernel(unsigned int* const, unsigned int* const, const size_t); /** Maps values from d_in to d_out according to scatter addresses in d_sum_scan_0 or d_sum_scan_1. @param d_out Output array @param d_in Input array with values @param d_predicate Contains whether or not given value's LSB is 0 @param d_sum_scan_0 Scatter address for values with LSB 0 @param d_sum_scan_1 Scatter address for values with LSB 1 @param NUM_ELEMS Number of elements in arrays */ __global__ void map_kernel(unsigned int* const, unsigned int* const, unsigned int* const, const unsigned int* const, const unsigned int* const, const unsigned int* const, const unsigned int* const, const unsigned int* const, const unsigned int* const, const size_t); /** Calls reduce kernel to compute reduction. Runs log_(BLOCK_SIZE)(num_elems) times. @param d_out Output array @param d_in Input array with values @param num_elems Number of elements in arrays @param block_size Number of threads per block */ void reduce_wrapper(unsigned int* const, unsigned int* const, size_t, int); /** Computes an exclusive sum scan of scatter addresses for the given predicate array. @param d_out Output array with scatter addresses @param d_predicate Input array with predicates to be summed @param d_predicate_tmp Temporary array so we do not change d_predicate @param d_sum_scan Inclusive sum scan @param ARRAY_BYTES Number of bytes for arrays @param NUM_ELEMS Number of elements in arrays @param GRID_SIZE Number of blocks in one grid @param BLOCK_SIZE Number of threads in one block */ void exclusive_sum_scan(unsigned int* const, const unsigned int* const, unsigned int* const, unsigned int* const, const unsigned int, const size_t, const int, const int); /** Sort values using radix sort. @param h_input Input values to be sorted (unsigned int) @param NUM_ELEMS Number of elements in array @return Pointer to sorted array */ unsigned int* radix_sort(unsigned int*, const size_t); // Populates array with 1/0 depending on Least Significant Bit is set. __global__ void predicate_kernel(unsigned int* const d_predicate, const unsigned int* const d_sort_by, const size_t NUM_ELEMS, const unsigned int i) { const unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; d_predicate[mid] = (int)(((d_sort_by[mid] & (1 << i)) >> i) == 0); } // Performs one iteration of Hillis and Steele scan. __global__ void inclusive_sum_scan_kernel(unsigned int* const d_out, const unsigned int* const d_in, const int step, const size_t NUM_ELEMS) { const int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; int toAdd = (((mid - step) < 0) ? 0 : d_in[mid - step]); d_out[mid] = d_in[mid] + toAdd; } // Shifts all elements to the right. Sets first index to 0. __global__ void right_shift_array_kernel(unsigned int* const d_out, const unsigned int* const d_in, const size_t NUM_ELEMS) { const unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; d_out[mid] = (mid == 0) ? 0 : d_in[mid - 1]; } // Toggle array with values 1 and 0. __global__ void toggle_predicate_kernel(unsigned int* const d_out, const unsigned int* const d_predicate, const size_t NUM_ELEMS) { const unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; d_out[mid] = ((d_predicate[mid]) ? 0 : 1); } // Adds an offset to the given array's values. __global__ void add_splitter_map_kernel(unsigned int* const d_out, const unsigned int* const shift, const size_t NUM_ELEMS) { const unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; d_out[mid] += shift[0]; } // Computes the sum of elements in d_in __global__ void reduce_kernel(unsigned int* const d_out, unsigned int* const d_in, const size_t NUM_ELEMS) { unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tid = threadIdx.x; for (unsigned int s = blockDim.x / 2; s > 0; s >>=1) { if ((tid < s) && ((pos + s) < NUM_ELEMS)) d_in[pos] = d_in[pos] + d_in[pos + s]; __syncthreads(); } // only thread 0 writes result, as thread if ((tid == 0) && (pos < NUM_ELEMS)) d_out[blockIdx.x] = d_in[pos]; } // Maps values from d_in to d_out according to scatter addresses in d_sum_scan_0 or d_sum_scan_1. __global__ void map_kernel(unsigned int* const d_out_coarse, unsigned int* const d_out_bin, unsigned int* const d_out_val, const unsigned int* const d_in_coarse, const unsigned int* const d_in_bin, const unsigned int* const d_in_val, const unsigned int* const d_predicate, const unsigned int* const d_sum_scan_0, const unsigned int* const d_sum_scan_1, const size_t NUM_ELEMS) { const unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; const unsigned int pos = ((d_predicate[mid]) ? d_sum_scan_0[mid] : d_sum_scan_1[mid]); // EDIT: MOVE ACCORDINGLY FOR ALL ARRAYS d_out_val[pos] = d_in_val[mid]; d_out_bin[pos] = d_in_bin[mid]; d_out_coarse[pos] = d_in_coarse[mid]; } // Calls reduce kernel to compute reduction. void reduce_wrapper(unsigned int* const d_out, unsigned int* const d_in, size_t num_elems, int block_size) { unsigned int grid_size = num_elems / block_size + 1; unsigned int* d_tmp; checkCudaErrors(hipMalloc(&d_tmp, sizeof(unsigned int) * grid_size)); checkCudaErrors(hipMemset(d_tmp, 0, sizeof(unsigned int) * grid_size)); unsigned int prev_grid_size; unsigned int remainder = 0; // recursively solving, will run approximately log base block_size times. do { hipLaunchKernelGGL(( reduce_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_tmp, d_in, num_elems); remainder = num_elems % block_size; num_elems = num_elems / block_size + remainder; // updating input to intermediate checkCudaErrors(hipMemcpy(d_in, d_tmp, sizeof(int) * grid_size, hipMemcpyDeviceToDevice)); // Updating grid_size to reflect how many blocks we now want to compute on prev_grid_size = grid_size; grid_size = num_elems / block_size + 1; // updating intermediate checkCudaErrors(hipFree(d_tmp)); checkCudaErrors(hipMalloc(&d_tmp, sizeof(int) * grid_size)); } while(num_elems > block_size); // computing rest hipLaunchKernelGGL(( reduce_kernel), dim3(1), dim3(num_elems), 0, 0, d_out, d_in, prev_grid_size); } // Computes an exclusive sum scan of scatter addresses for the given predicate array. void exclusive_sum_scan(unsigned int* const d_out, const unsigned int* const d_predicate, unsigned int* const d_predicate_tmp, unsigned int* const d_sum_scan, const unsigned int ARRAY_BYTES, const size_t NUM_ELEMS, const int GRID_SIZE, const int BLOCK_SIZE) { // copy predicate values to new array checkCudaErrors(hipMemcpy(d_predicate_tmp, d_predicate, ARRAY_BYTES, hipMemcpyDeviceToDevice)); // set all elements to zero checkCudaErrors(hipMemset(d_sum_scan, 0, ARRAY_BYTES)); // sum scan call for (unsigned int step = 1; step < NUM_ELEMS; step *= 2) { hipLaunchKernelGGL(( inclusive_sum_scan_kernel), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, d_sum_scan, d_predicate_tmp, step, NUM_ELEMS); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(d_predicate_tmp, d_sum_scan, ARRAY_BYTES, hipMemcpyDeviceToDevice)); } // shift to get exclusive scan checkCudaErrors(hipMemcpy(d_out, d_sum_scan, ARRAY_BYTES, hipMemcpyDeviceToDevice)); hipLaunchKernelGGL(( right_shift_array_kernel), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, d_out, d_sum_scan, NUM_ELEMS); } // Sort values using radix sort // EDIT: sort by first array in h_to_be_sorted unsigned int** radix_sort(unsigned int** h_to_be_sorted, const size_t NUM_ARRAYS_TO_SORT, const size_t NUM_ELEMS) { const int BLOCK_SIZE = 1024; const int GRID_SIZE = NUM_ELEMS / BLOCK_SIZE + 1; const unsigned int ARRAY_BYTES = sizeof(unsigned int) * NUM_ELEMS; const unsigned int BITS_PER_BYTE = 8; // host memory unsigned int** h_output = new unsigned int*[NUM_ARRAYS_TO_SORT]; unsigned int* h_out_coarse = new unsigned int[NUM_ELEMS]; unsigned int* h_out_bin = new unsigned int[NUM_ELEMS]; unsigned int* h_out_val = new unsigned int[NUM_ELEMS]; // device memory unsigned int *d_in_bin, *d_in_val, *d_sort_by, *d_map_coarse, *d_map_val, *d_map_bin, *d_predicate, *d_sum_scan, *d_predicate_tmp, *d_sum_scan_0, *d_sum_scan_1, *d_predicate_toggle, *d_reduce; checkCudaErrors(hipMalloc((void **) &d_sort_by, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_in_bin, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_in_val, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_map_coarse, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_map_val, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_map_bin, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_predicate, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_predicate_tmp, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_predicate_toggle, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_sum_scan, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_sum_scan_0, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_sum_scan_1, ARRAY_BYTES)); checkCudaErrors(hipMalloc((void **) &d_reduce, sizeof(unsigned int))); // copy host array to device checkCudaErrors(hipMemcpy(d_sort_by, h_to_be_sorted[0], ARRAY_BYTES, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_in_bin, h_to_be_sorted[1], ARRAY_BYTES, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_in_val, h_to_be_sorted[2], ARRAY_BYTES, hipMemcpyHostToDevice)); for (unsigned int i = 0; i < (BITS_PER_BYTE * sizeof(unsigned int)); i++) { // predicate is that LSB is 0 hipLaunchKernelGGL(( predicate_kernel), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, d_predicate, d_sort_by, NUM_ELEMS, i); // calculate scatter addresses from predicates exclusive_sum_scan(d_sum_scan_0, d_predicate, d_predicate_tmp, d_sum_scan, ARRAY_BYTES, NUM_ELEMS, GRID_SIZE, BLOCK_SIZE); // copy contents of predicate, so we do not change its content checkCudaErrors(hipMemcpy(d_predicate_tmp, d_predicate, ARRAY_BYTES, hipMemcpyDeviceToDevice)); // calculate how many elements had predicate equal to 1 reduce_wrapper(d_reduce, d_predicate_tmp, NUM_ELEMS, BLOCK_SIZE); // toggle predicate values, so we can compute scatter addresses for toggled predicates hipLaunchKernelGGL(( toggle_predicate_kernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, d_predicate_toggle, d_predicate, NUM_ELEMS); // so we now have addresses for elements where LSB is equal to 1 exclusive_sum_scan(d_sum_scan_1, d_predicate_toggle, d_predicate_tmp, d_sum_scan, ARRAY_BYTES, NUM_ELEMS, GRID_SIZE, BLOCK_SIZE); // shift scatter addresses according to amount of elements that had LSB equal to 0 hipLaunchKernelGGL(( add_splitter_map_kernel), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, d_sum_scan_1, d_reduce, NUM_ELEMS); // move elements accordingly hipLaunchKernelGGL(( map_kernel), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, d_map_coarse, d_map_bin, d_map_val, d_sort_by, d_in_bin, d_in_val, d_predicate, d_sum_scan_0, d_sum_scan_1, NUM_ELEMS); // swap pointers, instead of moving elements std::swap(d_sort_by, d_map_coarse); std::swap(d_in_bin, d_map_bin); std::swap(d_in_val, d_map_val); } // copy contents back checkCudaErrors(hipMemcpy(h_out_coarse, d_sort_by, ARRAY_BYTES, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_out_bin, d_map_bin, ARRAY_BYTES, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_out_val, d_map_val, ARRAY_BYTES, hipMemcpyDeviceToHost)); h_output[0] = h_out_coarse; h_output[1] = h_out_bin; h_output[2] = h_out_val; return h_output; }
36a041e7427ac0ed39cc2cb8e90268eacc339914.cu
/** High Performance Computing (special course) radix_sort.cu Location: Technical University of Denmark Purpose: Uses GPU to sort series of unsigned integers using Radix Sort EDIT: This radix sort has been modified to take multiple arrays. We need to sort three arrays by some specific array. The radix sort is modified accordingly. @author Elias Obeid @author Alexander Johansen @version 1.0 16/01/2016 */ #include "radix_sort.h" /** Populates array with 1/0 depending on Least Significant Bit is set. If LSB is 0 then index is set to 1, otherwise 0. @param d_predicate Output array to be filled with values (predicates) @param d_sort_by Values to run through @param NUM_ELEMS Number of elements in arrays @param i Used to calculate how much to shift to find the correct LSB */ __global__ void predicate_kernel(unsigned int* const, const unsigned int* const, const size_t, const unsigned int); /** Performs one iteration of Hillis and Steele scan. Inclusive sum scan. @param d_out Output array with summed values @param d_in Values to sum @param step Amount to look back in d_in @param NUM_ELEMS Number of elements in arrays */ __global__ void inclusive_sum_scan_kernel(unsigned int* const, const unsigned int* const, const int, const size_t); /** Shifts all elements to the right. Sets first index to 0. @param d_out Output array @param d_in Array to be shifted @param NUM_ELEMS Number of elements in arrays */ __global__ void right_shift_array_kernel(unsigned int* const, const unsigned int* const, const size_t); /** Toggle array with values 1 and 0. @param d_out Array with toggled values @param d_predicate Array with initial values @param NUM_ELEMS Number of elements in arrays */ __global__ void toggle_predicate_kernel(unsigned int* const, const unsigned int* const, const size_t); /** Adds an offset to the given array's values. @param d_out Input/Output array -- values will be added to offset @param shift Array with one element -- the offset to add @param NUM_ELEMS Number of elements in arrays */ __global__ void add_splitter_map_kernel(unsigned int* const, const unsigned int* const, const size_t); /** Runs log_2(BLOCK_SIZE) iterations of the reduce. Computes the sum of elements in d_in @param d_out Output array @param d_in Input array with values @param NUM_ELEMS Number of elements in arrays */ __global__ void reduce_kernel(unsigned int* const, unsigned int* const, const size_t); /** Maps values from d_in to d_out according to scatter addresses in d_sum_scan_0 or d_sum_scan_1. @param d_out Output array @param d_in Input array with values @param d_predicate Contains whether or not given value's LSB is 0 @param d_sum_scan_0 Scatter address for values with LSB 0 @param d_sum_scan_1 Scatter address for values with LSB 1 @param NUM_ELEMS Number of elements in arrays */ __global__ void map_kernel(unsigned int* const, unsigned int* const, unsigned int* const, const unsigned int* const, const unsigned int* const, const unsigned int* const, const unsigned int* const, const unsigned int* const, const unsigned int* const, const size_t); /** Calls reduce kernel to compute reduction. Runs log_(BLOCK_SIZE)(num_elems) times. @param d_out Output array @param d_in Input array with values @param num_elems Number of elements in arrays @param block_size Number of threads per block */ void reduce_wrapper(unsigned int* const, unsigned int* const, size_t, int); /** Computes an exclusive sum scan of scatter addresses for the given predicate array. @param d_out Output array with scatter addresses @param d_predicate Input array with predicates to be summed @param d_predicate_tmp Temporary array so we do not change d_predicate @param d_sum_scan Inclusive sum scan @param ARRAY_BYTES Number of bytes for arrays @param NUM_ELEMS Number of elements in arrays @param GRID_SIZE Number of blocks in one grid @param BLOCK_SIZE Number of threads in one block */ void exclusive_sum_scan(unsigned int* const, const unsigned int* const, unsigned int* const, unsigned int* const, const unsigned int, const size_t, const int, const int); /** Sort values using radix sort. @param h_input Input values to be sorted (unsigned int) @param NUM_ELEMS Number of elements in array @return Pointer to sorted array */ unsigned int* radix_sort(unsigned int*, const size_t); // Populates array with 1/0 depending on Least Significant Bit is set. __global__ void predicate_kernel(unsigned int* const d_predicate, const unsigned int* const d_sort_by, const size_t NUM_ELEMS, const unsigned int i) { const unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; d_predicate[mid] = (int)(((d_sort_by[mid] & (1 << i)) >> i) == 0); } // Performs one iteration of Hillis and Steele scan. __global__ void inclusive_sum_scan_kernel(unsigned int* const d_out, const unsigned int* const d_in, const int step, const size_t NUM_ELEMS) { const int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; int toAdd = (((mid - step) < 0) ? 0 : d_in[mid - step]); d_out[mid] = d_in[mid] + toAdd; } // Shifts all elements to the right. Sets first index to 0. __global__ void right_shift_array_kernel(unsigned int* const d_out, const unsigned int* const d_in, const size_t NUM_ELEMS) { const unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; d_out[mid] = (mid == 0) ? 0 : d_in[mid - 1]; } // Toggle array with values 1 and 0. __global__ void toggle_predicate_kernel(unsigned int* const d_out, const unsigned int* const d_predicate, const size_t NUM_ELEMS) { const unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; d_out[mid] = ((d_predicate[mid]) ? 0 : 1); } // Adds an offset to the given array's values. __global__ void add_splitter_map_kernel(unsigned int* const d_out, const unsigned int* const shift, const size_t NUM_ELEMS) { const unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; d_out[mid] += shift[0]; } // Computes the sum of elements in d_in __global__ void reduce_kernel(unsigned int* const d_out, unsigned int* const d_in, const size_t NUM_ELEMS) { unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tid = threadIdx.x; for (unsigned int s = blockDim.x / 2; s > 0; s >>=1) { if ((tid < s) && ((pos + s) < NUM_ELEMS)) d_in[pos] = d_in[pos] + d_in[pos + s]; __syncthreads(); } // only thread 0 writes result, as thread if ((tid == 0) && (pos < NUM_ELEMS)) d_out[blockIdx.x] = d_in[pos]; } // Maps values from d_in to d_out according to scatter addresses in d_sum_scan_0 or d_sum_scan_1. __global__ void map_kernel(unsigned int* const d_out_coarse, unsigned int* const d_out_bin, unsigned int* const d_out_val, const unsigned int* const d_in_coarse, const unsigned int* const d_in_bin, const unsigned int* const d_in_val, const unsigned int* const d_predicate, const unsigned int* const d_sum_scan_0, const unsigned int* const d_sum_scan_1, const size_t NUM_ELEMS) { const unsigned int mid = threadIdx.x + blockIdx.x * blockDim.x; if (mid >= NUM_ELEMS) return; const unsigned int pos = ((d_predicate[mid]) ? d_sum_scan_0[mid] : d_sum_scan_1[mid]); // EDIT: MOVE ACCORDINGLY FOR ALL ARRAYS d_out_val[pos] = d_in_val[mid]; d_out_bin[pos] = d_in_bin[mid]; d_out_coarse[pos] = d_in_coarse[mid]; } // Calls reduce kernel to compute reduction. void reduce_wrapper(unsigned int* const d_out, unsigned int* const d_in, size_t num_elems, int block_size) { unsigned int grid_size = num_elems / block_size + 1; unsigned int* d_tmp; checkCudaErrors(cudaMalloc(&d_tmp, sizeof(unsigned int) * grid_size)); checkCudaErrors(cudaMemset(d_tmp, 0, sizeof(unsigned int) * grid_size)); unsigned int prev_grid_size; unsigned int remainder = 0; // recursively solving, will run approximately log base block_size times. do { reduce_kernel<<<grid_size, block_size>>>(d_tmp, d_in, num_elems); remainder = num_elems % block_size; num_elems = num_elems / block_size + remainder; // updating input to intermediate checkCudaErrors(cudaMemcpy(d_in, d_tmp, sizeof(int) * grid_size, cudaMemcpyDeviceToDevice)); // Updating grid_size to reflect how many blocks we now want to compute on prev_grid_size = grid_size; grid_size = num_elems / block_size + 1; // updating intermediate checkCudaErrors(cudaFree(d_tmp)); checkCudaErrors(cudaMalloc(&d_tmp, sizeof(int) * grid_size)); } while(num_elems > block_size); // computing rest reduce_kernel<<<1, num_elems>>>(d_out, d_in, prev_grid_size); } // Computes an exclusive sum scan of scatter addresses for the given predicate array. void exclusive_sum_scan(unsigned int* const d_out, const unsigned int* const d_predicate, unsigned int* const d_predicate_tmp, unsigned int* const d_sum_scan, const unsigned int ARRAY_BYTES, const size_t NUM_ELEMS, const int GRID_SIZE, const int BLOCK_SIZE) { // copy predicate values to new array checkCudaErrors(cudaMemcpy(d_predicate_tmp, d_predicate, ARRAY_BYTES, cudaMemcpyDeviceToDevice)); // set all elements to zero checkCudaErrors(cudaMemset(d_sum_scan, 0, ARRAY_BYTES)); // sum scan call for (unsigned int step = 1; step < NUM_ELEMS; step *= 2) { inclusive_sum_scan_kernel<<<GRID_SIZE,BLOCK_SIZE>>>(d_sum_scan, d_predicate_tmp, step, NUM_ELEMS); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(d_predicate_tmp, d_sum_scan, ARRAY_BYTES, cudaMemcpyDeviceToDevice)); } // shift to get exclusive scan checkCudaErrors(cudaMemcpy(d_out, d_sum_scan, ARRAY_BYTES, cudaMemcpyDeviceToDevice)); right_shift_array_kernel<<<GRID_SIZE,BLOCK_SIZE>>>(d_out, d_sum_scan, NUM_ELEMS); } // Sort values using radix sort // EDIT: sort by first array in h_to_be_sorted unsigned int** radix_sort(unsigned int** h_to_be_sorted, const size_t NUM_ARRAYS_TO_SORT, const size_t NUM_ELEMS) { const int BLOCK_SIZE = 1024; const int GRID_SIZE = NUM_ELEMS / BLOCK_SIZE + 1; const unsigned int ARRAY_BYTES = sizeof(unsigned int) * NUM_ELEMS; const unsigned int BITS_PER_BYTE = 8; // host memory unsigned int** h_output = new unsigned int*[NUM_ARRAYS_TO_SORT]; unsigned int* h_out_coarse = new unsigned int[NUM_ELEMS]; unsigned int* h_out_bin = new unsigned int[NUM_ELEMS]; unsigned int* h_out_val = new unsigned int[NUM_ELEMS]; // device memory unsigned int *d_in_bin, *d_in_val, *d_sort_by, *d_map_coarse, *d_map_val, *d_map_bin, *d_predicate, *d_sum_scan, *d_predicate_tmp, *d_sum_scan_0, *d_sum_scan_1, *d_predicate_toggle, *d_reduce; checkCudaErrors(cudaMalloc((void **) &d_sort_by, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_in_bin, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_in_val, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_map_coarse, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_map_val, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_map_bin, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_predicate, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_predicate_tmp, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_predicate_toggle, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_sum_scan, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_sum_scan_0, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_sum_scan_1, ARRAY_BYTES)); checkCudaErrors(cudaMalloc((void **) &d_reduce, sizeof(unsigned int))); // copy host array to device checkCudaErrors(cudaMemcpy(d_sort_by, h_to_be_sorted[0], ARRAY_BYTES, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_in_bin, h_to_be_sorted[1], ARRAY_BYTES, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_in_val, h_to_be_sorted[2], ARRAY_BYTES, cudaMemcpyHostToDevice)); for (unsigned int i = 0; i < (BITS_PER_BYTE * sizeof(unsigned int)); i++) { // predicate is that LSB is 0 predicate_kernel<<<GRID_SIZE,BLOCK_SIZE>>>(d_predicate, d_sort_by, NUM_ELEMS, i); // calculate scatter addresses from predicates exclusive_sum_scan(d_sum_scan_0, d_predicate, d_predicate_tmp, d_sum_scan, ARRAY_BYTES, NUM_ELEMS, GRID_SIZE, BLOCK_SIZE); // copy contents of predicate, so we do not change its content checkCudaErrors(cudaMemcpy(d_predicate_tmp, d_predicate, ARRAY_BYTES, cudaMemcpyDeviceToDevice)); // calculate how many elements had predicate equal to 1 reduce_wrapper(d_reduce, d_predicate_tmp, NUM_ELEMS, BLOCK_SIZE); // toggle predicate values, so we can compute scatter addresses for toggled predicates toggle_predicate_kernel<<<GRID_SIZE, BLOCK_SIZE>>>(d_predicate_toggle, d_predicate, NUM_ELEMS); // so we now have addresses for elements where LSB is equal to 1 exclusive_sum_scan(d_sum_scan_1, d_predicate_toggle, d_predicate_tmp, d_sum_scan, ARRAY_BYTES, NUM_ELEMS, GRID_SIZE, BLOCK_SIZE); // shift scatter addresses according to amount of elements that had LSB equal to 0 add_splitter_map_kernel<<<GRID_SIZE, BLOCK_SIZE>>>(d_sum_scan_1, d_reduce, NUM_ELEMS); // move elements accordingly map_kernel<<<GRID_SIZE,BLOCK_SIZE>>>(d_map_coarse, d_map_bin, d_map_val, d_sort_by, d_in_bin, d_in_val, d_predicate, d_sum_scan_0, d_sum_scan_1, NUM_ELEMS); // swap pointers, instead of moving elements std::swap(d_sort_by, d_map_coarse); std::swap(d_in_bin, d_map_bin); std::swap(d_in_val, d_map_val); } // copy contents back checkCudaErrors(cudaMemcpy(h_out_coarse, d_sort_by, ARRAY_BYTES, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_out_bin, d_map_bin, ARRAY_BYTES, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_out_val, d_map_val, ARRAY_BYTES, cudaMemcpyDeviceToHost)); h_output[0] = h_out_coarse; h_output[1] = h_out_bin; h_output[2] = h_out_val; return h_output; }
747502d99414d704e57a83e78cf6b8519a561a88.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_opg_utils.h" #include <cuml/common/logger.hpp> #include <cuml/decomposition/pca_mg.hpp> #include <cumlprims/opg/linalg/gemm.hpp> #include <cumlprims/opg/matrix/matrix_utils.hpp> #include <gtest/gtest.h> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <test_utils.h> #include <raft/comms/mpi_comms.hpp> namespace MLCommon { namespace Test { namespace opg { struct PCAOpgParams { int M; int N; int N_components; ML::mg_solver algorithm; std::vector<int> partSizes; std::vector<int> ranksOwners; Matrix::Layout layout; unsigned long long int seed; }; template <typename T> class PCAOpgTest : public testing::TestWithParam<PCAOpgParams> { public: void SetUp() { params = GetParam(); raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD); // Prepare resource const raft::comms::comms_t& comm = handle.get_comms(); stream = handle.get_stream(); hipblasHandle_t cublasHandle = handle.get_cublas_handle(); myRank = comm.get_rank(); totalRanks = comm.get_size(); raft::random::Rng r(params.seed + myRank); RAFT_CUBLAS_TRY(hipblasSetStream(cublasHandle, stream)); if (myRank == 0) { std::cout << "Testing PCA of " << params.M << " x " << params.N << " matrix" << std::endl; } // Prepare X matrix std::vector<Matrix::RankSizePair*> totalPartsToRanks; for (int i = 0; i < params.partSizes.size(); i++) { Matrix::RankSizePair* rspt = new Matrix::RankSizePair(params.ranksOwners[i] % totalRanks, params.partSizes[i]); totalPartsToRanks.push_back(rspt); } Matrix::PartDescriptor desc( params.M, params.N, totalPartsToRanks, comm.get_rank(), params.layout); std::vector<Matrix::Data<T>*> inParts; Matrix::opg::allocate(handle, inParts, desc, myRank, stream); Matrix::opg::randomize(handle, r, inParts, desc, myRank, stream, T(10.0), T(20.0)); handle.sync_stream(); prmsPCA.n_rows = params.M; prmsPCA.n_cols = params.N; prmsPCA.n_components = params.N_components; prmsPCA.whiten = false; prmsPCA.n_iterations = 100; prmsPCA.tol = 0.01; prmsPCA.algorithm = params.algorithm; rmm::device_uvector<T> components(prmsPCA.n_components * prmsPCA.n_cols, stream); rmm::device_uvector<T> explained_var(prmsPCA.n_components, stream); rmm::device_uvector<T> explained_var_ratio(prmsPCA.n_components, stream); rmm::device_uvector<T> singular_vals(prmsPCA.n_components, stream); rmm::device_uvector<T> mu(prmsPCA.n_cols, stream); rmm::device_uvector<T> noise_vars(prmsPCA.n_components, stream); ML::PCA::opg::fit(handle, inParts, desc, components.data(), explained_var.data(), explained_var_ratio.data(), singular_vals.data(), mu.data(), noise_vars.data(), prmsPCA, false); CUML_LOG_DEBUG( raft::arr2Str(singular_vals.data(), params.N_components, "Singular Vals", stream).c_str()); CUML_LOG_DEBUG( raft::arr2Str(explained_var.data(), params.N_components, "Explained Variance", stream) .c_str()); CUML_LOG_DEBUG( raft::arr2Str( explained_var_ratio.data(), params.N_components, "Explained Variance Ratio", stream) .c_str()); CUML_LOG_DEBUG( raft::arr2Str(components.data(), params.N_components * params.N, "Components", stream) .c_str()); Matrix::opg::deallocate(handle, inParts, desc, myRank, stream); } protected: PCAOpgParams params; raft::handle_t handle; hipStream_t stream = 0; int myRank; int totalRanks; ML::paramsPCAMG prmsPCA; }; const std::vector<PCAOpgParams> inputs = { {20, 4, 2, ML::mg_solver::COV_EIG_JACOBI, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}, {20, 4, 2, ML::mg_solver::COV_EIG_DQ, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}, {20, 4, 2, ML::mg_solver::QR, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}}; typedef PCAOpgTest<float> PCAOpgTestF; TEST_P(PCAOpgTestF, Result) { if (myRank == 0) { // We should be inverse transforming and checking against the original // data here. Github reference: https://github.com/rapidsai/cuml/issues/2474 ASSERT_TRUE(true); } } INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestF, ::testing::ValuesIn(inputs)); typedef PCAOpgTest<double> PCAOpgTestD; TEST_P(PCAOpgTestD, Result) { if (myRank == 0) { // We should be inverse transforming and checking against the original // data here. Github reference: https://github.com/rapidsai/cuml/issues/2474 ASSERT_TRUE(true); } } INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestD, ::testing::ValuesIn(inputs)); } // end namespace opg } // end namespace Test } // end namespace MLCommon
747502d99414d704e57a83e78cf6b8519a561a88.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_opg_utils.h" #include <cuml/common/logger.hpp> #include <cuml/decomposition/pca_mg.hpp> #include <cumlprims/opg/linalg/gemm.hpp> #include <cumlprims/opg/matrix/matrix_utils.hpp> #include <gtest/gtest.h> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <test_utils.h> #include <raft/comms/mpi_comms.hpp> namespace MLCommon { namespace Test { namespace opg { struct PCAOpgParams { int M; int N; int N_components; ML::mg_solver algorithm; std::vector<int> partSizes; std::vector<int> ranksOwners; Matrix::Layout layout; unsigned long long int seed; }; template <typename T> class PCAOpgTest : public testing::TestWithParam<PCAOpgParams> { public: void SetUp() { params = GetParam(); raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD); // Prepare resource const raft::comms::comms_t& comm = handle.get_comms(); stream = handle.get_stream(); cublasHandle_t cublasHandle = handle.get_cublas_handle(); myRank = comm.get_rank(); totalRanks = comm.get_size(); raft::random::Rng r(params.seed + myRank); RAFT_CUBLAS_TRY(cublasSetStream(cublasHandle, stream)); if (myRank == 0) { std::cout << "Testing PCA of " << params.M << " x " << params.N << " matrix" << std::endl; } // Prepare X matrix std::vector<Matrix::RankSizePair*> totalPartsToRanks; for (int i = 0; i < params.partSizes.size(); i++) { Matrix::RankSizePair* rspt = new Matrix::RankSizePair(params.ranksOwners[i] % totalRanks, params.partSizes[i]); totalPartsToRanks.push_back(rspt); } Matrix::PartDescriptor desc( params.M, params.N, totalPartsToRanks, comm.get_rank(), params.layout); std::vector<Matrix::Data<T>*> inParts; Matrix::opg::allocate(handle, inParts, desc, myRank, stream); Matrix::opg::randomize(handle, r, inParts, desc, myRank, stream, T(10.0), T(20.0)); handle.sync_stream(); prmsPCA.n_rows = params.M; prmsPCA.n_cols = params.N; prmsPCA.n_components = params.N_components; prmsPCA.whiten = false; prmsPCA.n_iterations = 100; prmsPCA.tol = 0.01; prmsPCA.algorithm = params.algorithm; rmm::device_uvector<T> components(prmsPCA.n_components * prmsPCA.n_cols, stream); rmm::device_uvector<T> explained_var(prmsPCA.n_components, stream); rmm::device_uvector<T> explained_var_ratio(prmsPCA.n_components, stream); rmm::device_uvector<T> singular_vals(prmsPCA.n_components, stream); rmm::device_uvector<T> mu(prmsPCA.n_cols, stream); rmm::device_uvector<T> noise_vars(prmsPCA.n_components, stream); ML::PCA::opg::fit(handle, inParts, desc, components.data(), explained_var.data(), explained_var_ratio.data(), singular_vals.data(), mu.data(), noise_vars.data(), prmsPCA, false); CUML_LOG_DEBUG( raft::arr2Str(singular_vals.data(), params.N_components, "Singular Vals", stream).c_str()); CUML_LOG_DEBUG( raft::arr2Str(explained_var.data(), params.N_components, "Explained Variance", stream) .c_str()); CUML_LOG_DEBUG( raft::arr2Str( explained_var_ratio.data(), params.N_components, "Explained Variance Ratio", stream) .c_str()); CUML_LOG_DEBUG( raft::arr2Str(components.data(), params.N_components * params.N, "Components", stream) .c_str()); Matrix::opg::deallocate(handle, inParts, desc, myRank, stream); } protected: PCAOpgParams params; raft::handle_t handle; cudaStream_t stream = 0; int myRank; int totalRanks; ML::paramsPCAMG prmsPCA; }; const std::vector<PCAOpgParams> inputs = { {20, 4, 2, ML::mg_solver::COV_EIG_JACOBI, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}, {20, 4, 2, ML::mg_solver::COV_EIG_DQ, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}, {20, 4, 2, ML::mg_solver::QR, {11, 9}, {1, 0}, Matrix::LayoutColMajor, 223548ULL}}; typedef PCAOpgTest<float> PCAOpgTestF; TEST_P(PCAOpgTestF, Result) { if (myRank == 0) { // We should be inverse transforming and checking against the original // data here. Github reference: https://github.com/rapidsai/cuml/issues/2474 ASSERT_TRUE(true); } } INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestF, ::testing::ValuesIn(inputs)); typedef PCAOpgTest<double> PCAOpgTestD; TEST_P(PCAOpgTestD, Result) { if (myRank == 0) { // We should be inverse transforming and checking against the original // data here. Github reference: https://github.com/rapidsai/cuml/issues/2474 ASSERT_TRUE(true); } } INSTANTIATE_TEST_CASE_P(PCAOpgTest, PCAOpgTestD, ::testing::ValuesIn(inputs)); } // end namespace opg } // end namespace Test } // end namespace MLCommon
1a8ff68ade9b32f10055020122b13033cf06fe67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "decoder_masked_multihead_attention_template.hpp" #include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h" #include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h" #include "src/fastertransformer/utils/cuda_bf16_wrapper.h" #include <assert.h> #include <float.h> #include <type_traits> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MMHA_LAUNCH_KERNEL( \ T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, DO_CROSS_ATTENTION, HAS_BEAMS, stream) \ size_t smem_sz = mmha::smem_size_in_bytes<T, DO_CROSS_ATTENTION>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \ dim3 grid(params.num_heads, params.batch_size); \ hipLaunchKernelGGL(( mmha::masked_multihead_attention_kernel<T, \ Dh, \ Dh_MAX, \ THDS_PER_KEY, \ THDS_PER_VALUE, \ THDS_PER_BLOCK, \ DO_CROSS_ATTENTION, \ HAS_BEAMS>), dim3(grid), dim3(THDS_PER_BLOCK), smem_sz, stream, params) //////////////////////////////////////////////////////////////////////////////////////////////////// // !!! Specialize the launcher for Cross attention template<typename T, int Dh, int Dh_MAX, typename KERNEL_PARAMS_TYPE> void mmha_launch_kernel(const KERNEL_PARAMS_TYPE& params, const hipStream_t& stream) { constexpr int THREADS_PER_VALUE = threads_per_value_t<T, Dh_MAX>::value; constexpr bool DO_CROSS_ATTENTION = std::is_same<KERNEL_PARAMS_TYPE, Cross_multihead_attention_params<T>>::value; int tlength = (DO_CROSS_ATTENTION) ? params.memory_max_len : params.timestep; if (params.cache_indir == nullptr) { if (tlength < 32) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, false, stream); } else if (tlength < 2048) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, false, stream); } else { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, false, stream); } } else { if (tlength < 32) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, true, stream); } else if (tlength < 2048) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, true, stream); } else { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, true, stream); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// template void mmha_launch_kernel<float, 80, 128, Masked_multihead_attention_params<float>>( const Masked_multihead_attention_params<float>& params, const hipStream_t& stream); template void mmha_launch_kernel<uint16_t, 80, 128, Masked_multihead_attention_params<uint16_t>>( const Masked_multihead_attention_params<uint16_t>& params, const hipStream_t& stream); #ifdef ENABLE_BF16 template void mmha_launch_kernel<__nv_bfloat16, 80, 128, Masked_multihead_attention_params<__nv_bfloat16>>( const Masked_multihead_attention_params<__nv_bfloat16>& params, const hipStream_t& stream); #endif #ifdef ENABLE_FP8 template void mmha_launch_kernel<__nv_fp8_e4m3, 80, 128, Masked_multihead_attention_params<__nv_fp8_e4m3>>( const Masked_multihead_attention_params<__nv_fp8_e4m3>& params, const hipStream_t& stream); #endif template void mmha_launch_kernel<float, 80, 128, Cross_multihead_attention_params<float>>( const Cross_multihead_attention_params<float>& params, const hipStream_t& stream); template void mmha_launch_kernel<uint16_t, 80, 128, Cross_multihead_attention_params<uint16_t>>( const Cross_multihead_attention_params<uint16_t>& params, const hipStream_t& stream); #ifdef ENABLE_BF16 template void mmha_launch_kernel<__nv_bfloat16, 80, 128, Cross_multihead_attention_params<__nv_bfloat16>>( const Cross_multihead_attention_params<__nv_bfloat16>& params, const hipStream_t& stream); #endif #ifdef ENABLE_FP8 template void mmha_launch_kernel<__nv_fp8_e4m3, 80, 128, Cross_multihead_attention_params<__nv_fp8_e4m3>>( const Cross_multihead_attention_params<__nv_fp8_e4m3>& params, const hipStream_t& stream); #endif #undef MMHA_LAUNCH_KERNEL
1a8ff68ade9b32f10055020122b13033cf06fe67.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "decoder_masked_multihead_attention_template.hpp" #include "src/fastertransformer/kernels/decoder_masked_multihead_attention.h" #include "src/fastertransformer/kernels/decoder_masked_multihead_attention_utils.h" #include "src/fastertransformer/utils/cuda_bf16_wrapper.h" #include <assert.h> #include <float.h> #include <type_traits> //////////////////////////////////////////////////////////////////////////////////////////////////// #define MMHA_LAUNCH_KERNEL( \ T, Dh, Dh_MAX, THDS_PER_KEY, THDS_PER_VALUE, THDS_PER_BLOCK, DO_CROSS_ATTENTION, HAS_BEAMS, stream) \ size_t smem_sz = mmha::smem_size_in_bytes<T, DO_CROSS_ATTENTION>(params, THDS_PER_VALUE, THDS_PER_BLOCK); \ dim3 grid(params.num_heads, params.batch_size); \ mmha::masked_multihead_attention_kernel<T, \ Dh, \ Dh_MAX, \ THDS_PER_KEY, \ THDS_PER_VALUE, \ THDS_PER_BLOCK, \ DO_CROSS_ATTENTION, \ HAS_BEAMS><<<grid, THDS_PER_BLOCK, smem_sz, stream>>>(params) //////////////////////////////////////////////////////////////////////////////////////////////////// // !!! Specialize the launcher for Cross attention template<typename T, int Dh, int Dh_MAX, typename KERNEL_PARAMS_TYPE> void mmha_launch_kernel(const KERNEL_PARAMS_TYPE& params, const cudaStream_t& stream) { constexpr int THREADS_PER_VALUE = threads_per_value_t<T, Dh_MAX>::value; constexpr bool DO_CROSS_ATTENTION = std::is_same<KERNEL_PARAMS_TYPE, Cross_multihead_attention_params<T>>::value; int tlength = (DO_CROSS_ATTENTION) ? params.memory_max_len : params.timestep; if (params.cache_indir == nullptr) { if (tlength < 32) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, false, stream); } else if (tlength < 2048) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, false, stream); } else { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, false, stream); } } else { if (tlength < 32) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 4, THREADS_PER_VALUE, 64, DO_CROSS_ATTENTION, true, stream); } else if (tlength < 2048) { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 2, THREADS_PER_VALUE, 128, DO_CROSS_ATTENTION, true, stream); } else { MMHA_LAUNCH_KERNEL(T, Dh, Dh_MAX, 1, THREADS_PER_VALUE, 256, DO_CROSS_ATTENTION, true, stream); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// template void mmha_launch_kernel<float, 80, 128, Masked_multihead_attention_params<float>>( const Masked_multihead_attention_params<float>& params, const cudaStream_t& stream); template void mmha_launch_kernel<uint16_t, 80, 128, Masked_multihead_attention_params<uint16_t>>( const Masked_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream); #ifdef ENABLE_BF16 template void mmha_launch_kernel<__nv_bfloat16, 80, 128, Masked_multihead_attention_params<__nv_bfloat16>>( const Masked_multihead_attention_params<__nv_bfloat16>& params, const cudaStream_t& stream); #endif #ifdef ENABLE_FP8 template void mmha_launch_kernel<__nv_fp8_e4m3, 80, 128, Masked_multihead_attention_params<__nv_fp8_e4m3>>( const Masked_multihead_attention_params<__nv_fp8_e4m3>& params, const cudaStream_t& stream); #endif template void mmha_launch_kernel<float, 80, 128, Cross_multihead_attention_params<float>>( const Cross_multihead_attention_params<float>& params, const cudaStream_t& stream); template void mmha_launch_kernel<uint16_t, 80, 128, Cross_multihead_attention_params<uint16_t>>( const Cross_multihead_attention_params<uint16_t>& params, const cudaStream_t& stream); #ifdef ENABLE_BF16 template void mmha_launch_kernel<__nv_bfloat16, 80, 128, Cross_multihead_attention_params<__nv_bfloat16>>( const Cross_multihead_attention_params<__nv_bfloat16>& params, const cudaStream_t& stream); #endif #ifdef ENABLE_FP8 template void mmha_launch_kernel<__nv_fp8_e4m3, 80, 128, Cross_multihead_attention_params<__nv_fp8_e4m3>>( const Cross_multihead_attention_params<__nv_fp8_e4m3>& params, const cudaStream_t& stream); #endif #undef MMHA_LAUNCH_KERNEL
4a427bb944e0556f56113e3ee875196b6dd5d570.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // #define UNIFIED #ifdef UNIFIED // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { uint index = blockIdx.x * blockDim.x + threadIdx.x; uint stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main() { int N = pow(2, 20); float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N * sizeof(float)); hipMallocManaged(&y, N * sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int blockSize = 32; int numBlocks = 128; hipEvent_t startT, stopT; float time; hipEventCreate(&startT); hipEventCreate(&stopT); hipEventRecord(startT, 0); hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y); hipEventRecord(stopT, 0); hipEventSynchronize(stopT); hipEventElapsedTime(&time, startT, stopT); hipEventDestroy(startT); hipEventDestroy(stopT); std::cout << "cuda function :" << time << " ms" << std::endl; // Run kernel on 1M elements on the GPU // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; } #else __global__ void add(int n, const float *x, float *y) { uint index = blockIdx.x * blockDim.x + threadIdx.x; uint stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main() { const int N = pow(2, 20); float x[N]; float y[N]; // initialize x and y arrays on the host for (uint i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } float *d_x; float *d_y; hipError_t cudaStatus; // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void **)&d_x, N * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMemcpy(d_x, x, N * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMalloc((void **)&d_y, N * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMemcpy(d_y, y, N * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } int blockSize = 32; int numBlocks = 128; hipEvent_t startT, stopT; float time; hipEventCreate(&startT); hipEventCreate(&stopT); hipEventRecord(startT, 0); hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, d_x, d_y); hipEventRecord(stopT, 0); hipEventSynchronize(stopT); hipEventElapsedTime(&time, startT, stopT); hipEventDestroy(startT); hipEventDestroy(stopT); std::cout << "cuda function :" << time << " ms" << std::endl; // Run kernel on 1M elements on the GPU // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(y, d_y, N * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "max error: " << maxError << std::endl; // Free memory hipFree(d_x); hipFree(d_y); return 0; } #endif
4a427bb944e0556f56113e3ee875196b6dd5d570.cu
#include <iostream> #include <math.h> // #define UNIFIED #ifdef UNIFIED // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { uint index = blockIdx.x * blockDim.x + threadIdx.x; uint stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main() { int N = pow(2, 20); float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N * sizeof(float)); cudaMallocManaged(&y, N * sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int blockSize = 32; int numBlocks = 128; cudaEvent_t startT, stopT; float time; cudaEventCreate(&startT); cudaEventCreate(&stopT); cudaEventRecord(startT, 0); add<<<numBlocks, blockSize>>>(N, x, y); cudaEventRecord(stopT, 0); cudaEventSynchronize(stopT); cudaEventElapsedTime(&time, startT, stopT); cudaEventDestroy(startT); cudaEventDestroy(stopT); std::cout << "cuda function :" << time << " ms" << std::endl; // Run kernel on 1M elements on the GPU // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; } #else __global__ void add(int n, const float *x, float *y) { uint index = blockIdx.x * blockDim.x + threadIdx.x; uint stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main() { const int N = pow(2, 20); float x[N]; float y[N]; // initialize x and y arrays on the host for (uint i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } float *d_x; float *d_y; cudaError_t cudaStatus; // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void **)&d_x, N * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMemcpy(d_x, x, N * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMalloc((void **)&d_y, N * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMemcpy(d_y, y, N * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } int blockSize = 32; int numBlocks = 128; cudaEvent_t startT, stopT; float time; cudaEventCreate(&startT); cudaEventCreate(&stopT); cudaEventRecord(startT, 0); add<<<numBlocks, blockSize>>>(N, d_x, d_y); cudaEventRecord(stopT, 0); cudaEventSynchronize(stopT); cudaEventElapsedTime(&time, startT, stopT); cudaEventDestroy(startT); cudaEventDestroy(stopT); std::cout << "cuda function :" << time << " ms" << std::endl; // Run kernel on 1M elements on the GPU // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(y, d_y, N * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "max error: " << maxError << std::endl; // Free memory cudaFree(d_x); cudaFree(d_y); return 0; } #endif
91a38e0e217fade669e9a1a3eac8451f717540b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2021 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #include "cuda_score_updater.hpp" #ifdef USE_CUDA_EXP namespace LightGBM { __global__ void AddScoreConstantKernel( const double val, const data_size_t num_data, double* score) { const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x); if (data_index < num_data) { score[data_index] += val; } } void CUDAScoreUpdater::LaunchAddScoreConstantKernel(const double val, const size_t offset) { const int num_blocks = (num_data_ + num_threads_per_block_) / num_threads_per_block_; Log::Debug("Adding init score = %lf", val); hipLaunchKernelGGL(( AddScoreConstantKernel), dim3(num_blocks), dim3(num_threads_per_block_), 0, 0, val, num_data_, cuda_score_ + offset); } __global__ void MultiplyScoreConstantKernel( const double val, const data_size_t num_data, double* score) { const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x); if (data_index < num_data) { score[data_index] *= val; } } void CUDAScoreUpdater::LaunchMultiplyScoreConstantKernel(const double val, const size_t offset) { const int num_blocks = (num_data_ + num_threads_per_block_) / num_threads_per_block_; hipLaunchKernelGGL(( MultiplyScoreConstantKernel), dim3(num_blocks), dim3(num_threads_per_block_), 0, 0, val, num_data_, cuda_score_ + offset); } } // namespace LightGBM #endif // USE_CUDA_EXP
91a38e0e217fade669e9a1a3eac8451f717540b1.cu
/*! * Copyright (c) 2021 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #include "cuda_score_updater.hpp" #ifdef USE_CUDA_EXP namespace LightGBM { __global__ void AddScoreConstantKernel( const double val, const data_size_t num_data, double* score) { const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x); if (data_index < num_data) { score[data_index] += val; } } void CUDAScoreUpdater::LaunchAddScoreConstantKernel(const double val, const size_t offset) { const int num_blocks = (num_data_ + num_threads_per_block_) / num_threads_per_block_; Log::Debug("Adding init score = %lf", val); AddScoreConstantKernel<<<num_blocks, num_threads_per_block_>>>(val, num_data_, cuda_score_ + offset); } __global__ void MultiplyScoreConstantKernel( const double val, const data_size_t num_data, double* score) { const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x); if (data_index < num_data) { score[data_index] *= val; } } void CUDAScoreUpdater::LaunchMultiplyScoreConstantKernel(const double val, const size_t offset) { const int num_blocks = (num_data_ + num_threads_per_block_) / num_threads_per_block_; MultiplyScoreConstantKernel<<<num_blocks, num_threads_per_block_>>>(val, num_data_, cuda_score_ + offset); } } // namespace LightGBM #endif // USE_CUDA_EXP
e0ac7b501dbddca9ec29b130e9dc279522fb3e80.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include "helper_math.h" #include "FastDeviceMinMax.h" #include "Logger.h" #include "CUDAAssert.h" #include <cstdio> __device__ float4* BVHTreeNodes; __device__ float4* TriangleWoopCoordinates; __device__ int* MappingFromTriangleAddressToIndex; __device__ void print_float4(float4 printVal) { printf("%f, %f, %f, %f\n", printVal.x, printVal.y, printVal.z, printVal.w); } #ifdef MAGIC __device__ __noinline__ void __traceRay( Ray rayProperties, Hit* rayResultBuffer, const float4* startingNode, const float4* triNode ) { printf("Magic Function\n"); /* Ray struct definition origin_tmin = make_float4(o_x, o_y, o_z, t_min); dir_tmax = make_float4(d_x, d_y, d_z, t_max); */ float3 RayOrigin = make_float3(rayProperties.origin_tmin); float3 RayDirection = make_float3(rayProperties.dir_tmax); float tmin = rayProperties.origin_tmin.w; float tmax = rayProperties.dir_tmax.w; printf("tmin: %f\n", tmin); printf("tmax: %f\n", tmax); printf("ray origin: %f, %f, %f\n", RayOrigin.x, RayOrigin.y, RayOrigin.z); printf("ray direction: %f, %f, %f\n", RayDirection.x, RayDirection.y, RayDirection.z); printf("starting node address: 0x%x\n", startingNode); printf("triangle node address: 0x%x\n", triNode); float4 node = *startingNode; printf("node content: %f, %f, %f, %f\n", node.x, node.y, node.z, node.w); printf("result buffer address: 0x%x\n", rayResultBuffer); print_float4((*rayResultBuffer).t_triId_u_v); printf("anyhit: %d\n", rayProperties.anyhit); return; } #else __device__ inline bool RayBoxIntersection(float3 Low, float3 High, float3 InvDir, float3 Ood, float TMin, float TMax, float& OutIntersectionDist) { // ood = RayOrigin * idir; const float3 lo = Low * InvDir - Ood; // (Low - RayOrigin) / Direction const float3 hi = High * InvDir - Ood; const float slabMin = tMinFermi(lo.x, hi.x, lo.y, hi.y, lo.z, hi.z, TMin); const float slabMax = tMaxFermi(lo.x, hi.x, lo.y, hi.y, lo.z, hi.z, TMax); #ifdef DEBUG printf("low: %f, %f, %f\thigh: %f, %f, %f\n", Low.x, Low.y, Low.z, High.x, High.y, High.z); printf("lo: %f, %f, %f\thi: %f, %f, %f\n", lo.x, lo.y, lo.z, hi.x, hi.y, hi.z); printf("slabMin: %f\tslabMax: %f\n", slabMin, slabMax); #endif OutIntersectionDist = slabMin; return slabMin <= slabMax; } #endif __global__ void rtTraceBVH2Plain( Ray* rayBuffer, Hit* rayResultBuffer, int rayCount, int* finishedRayCount, bool anyhit ) { int rayidx = blockIdx.x * blockDim.x + threadIdx.x; if (rayidx >= rayCount) return; const float4* localBVHTreeNodes = BVHTreeNodes; const float4* localTriangleWoopCoordinates = TriangleWoopCoordinates; #ifdef MAGIC // Magic function to traverse BVH tree and test for hits, results stored into result buffer // localBVHTreeNodes is set by host in BVHManager.cpp > buildBVH2 // printf("Starting node %f\n", localBVHTreeNodes); // printf("Ray tmin %f\n", rayBuffer[rayidx].origin_tmin.w); rayBuffer[rayidx].anyhit = anyhit; __traceRay(rayBuffer[rayidx], &rayResultBuffer[rayidx], localBVHTreeNodes, localTriangleWoopCoordinates); #ifdef DEBUG printf("Traced result t: %f\n", rayResultBuffer[rayidx].t_triId_u_v.x); #endif #else // Setup traversal + initialisation const int EntrypointSentinel = 0x76543210; const int STACK_SIZE = 32; const float ooeps = exp2f(-80.0f); // Avoid div by zero, returns 1/2^80, an extremely small number int traversalStack[STACK_SIZE]; traversalStack[0] = EntrypointSentinel; // Bottom-most entry. 0x76543210 (1985229328 in decimal) int* stackPtr = &traversalStack[0]; // point stackPtr to bottom of traversal stack = EntryPointSentinel int nodeAddr = 0; // Start from the root. int hitAddr = -1; // No triangle intersected so far. int leafAddr = 0; float3 idir; // (1 / ray direction) float3 ood; float2 triangleuv; // Software Aila algorithm float3 RayOrigin = make_float3(rayBuffer[rayidx].origin_tmin); float3 RayDirection = make_float3(rayBuffer[rayidx].dir_tmax); float tmin = rayBuffer[rayidx].origin_tmin.w; float hitT = rayBuffer[rayidx].dir_tmax.w; // ooeps is very small number, used instead of raydir xyz component when that component is near zero idir.x = 1.0f / (fabsf(RayDirection.x) > ooeps ? RayDirection.x : copysignf(ooeps, RayDirection.x)); // inverse ray direction idir.y = 1.0f / (fabsf(RayDirection.y) > ooeps ? RayDirection.y : copysignf(ooeps, RayDirection.y)); // inverse ray direction idir.z = 1.0f / (fabsf(RayDirection.z) > ooeps ? RayDirection.z : copysignf(ooeps, RayDirection.z)); // inverse ray direction ood = RayOrigin * idir; #ifdef DEBUG printf("Ray origin: %f, %f, %f\n", RayOrigin.x, RayOrigin.y, RayOrigin.z); printf("Ray direction: %f, %f, %f\n", RayDirection.x, RayDirection.y, RayDirection.z); printf("Inverse direction: %f, %f, %f\n", idir.x, idir.y, idir.z); printf("OOD: %f, %f, %f\n", ood.x, ood.y, ood.z); #endif // Traversal loop. while (nodeAddr != EntrypointSentinel) { leafAddr = 0; while (nodeAddr != EntrypointSentinel && nodeAddr >= 0) { #ifdef DEBUG printf("\n"); #endif const float4 n0xy = __ldg(localBVHTreeNodes + nodeAddr + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y) const float4 n1xy = __ldg(localBVHTreeNodes + nodeAddr + 1); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y) const float4 n01z = __ldg(localBVHTreeNodes + nodeAddr + 2); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z) // Are child_index0 and child_index1 next to each other? float4 tmp = BVHTreeNodes[nodeAddr + 3]; // child_index0, child_index1 // Convert float4 into 2 float2s? int2 cnodes = *(int2*)&tmp; const float3 c0lo = make_float3(n0xy.x, n0xy.z, n01z.x); const float3 c0hi = make_float3(n0xy.y, n0xy.w, n01z.y); const float3 c1lo = make_float3(n1xy.x, n1xy.z, n01z.z); const float3 c1hi = make_float3(n1xy.y, n1xy.w, n01z.w); float c0dist, c1dist; // Ray box test on both child nodes bool traverseChild0 = RayBoxIntersection(c0lo, c0hi, idir, ood, tmin, hitT, c0dist); bool traverseChild1 = RayBoxIntersection(c1lo, c1hi, idir, ood, tmin, hitT, c1dist); #ifdef DEBUG printf("node addr: 0x%x\n", nodeAddr); print_float4(n0xy); print_float4(n1xy); print_float4(n01z); printf("cnodes: 0x%x, 0x%x\n", cnodes.x, cnodes.y); printf("C0hit: %d\t C1hit: %d\n", traverseChild0, traverseChild1); printf("C0dist: %f\t C1dist: %f\n", c0dist, c1dist); #endif // Check which child is closer? bool swp = c1dist < c0dist; // If both nodes miss, move to next node in stack if (!traverseChild0 && !traverseChild1) { nodeAddr = *stackPtr; stackPtr--; // printf("Both children miss; node addr: 0x%x\n", nodeAddr); } else { // If first child box hit, use child_index0? nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y; // printf("Traverse first child; node addr: 0x%x\n", nodeAddr); if (traverseChild0 && traverseChild1) { // If both boxes hit, check which one was closer and swap child_index0 and child_index1? if (swp) swap(nodeAddr, cnodes.y); // Push the farther node to the stack? stackPtr++; *stackPtr = cnodes.y; // printf("Both hit, traverse (closer) child; node addr: 0x%x\n", nodeAddr); } } // When is nodeAddr < 0? Are all leaf addresses negative? if (nodeAddr < 0 && leafAddr >= 0) { // Reached leaves. Pop next node from stack? leafAddr = nodeAddr; nodeAddr = *stackPtr; stackPtr--; // printf("Reached leaves; node addr: %x\n", nodeAddr); } if (!__any_sync(__activemask(), leafAddr >= 0)) break; } #ifdef DEBUG printf("Transition to leaves.\n"); #endif // Leaf intersections? while (leafAddr < 0) { for (int triAddr = ~leafAddr;; triAddr += 3) { #ifdef DEBUG printf("\nLeaf address: 0x%x\t", leafAddr); printf("Triangle address: 0x%x\n", triAddr); #endif // Get vertices? float4 v00 = __ldg(localTriangleWoopCoordinates + triAddr + 0); float4 v11 = __ldg(localTriangleWoopCoordinates + triAddr + 1); float4 v22 = __ldg(localTriangleWoopCoordinates + triAddr + 2); // End condition? if (__float_as_int(v00.x) == 0x80000000) { // printf("%d\n", (*(int*)&v00.x == 0x80000000)); break; } #ifdef DEBUG // printf("Triangle base: 0x%x, Triangle offset: 0x%x\n", localTriangleWoopCoordinates, triAddr); print_float4(v00); print_float4(v11); print_float4(v22); #endif // Multi-stage hit algorithm? float Oz = v00.w - RayOrigin.x * v00.x - RayOrigin.y * v00.y - RayOrigin.z * v00.z; float invDz = 1.0f / (RayDirection.x*v00.x + RayDirection.y*v00.y + RayDirection.z*v00.z); float t = Oz * invDz; #ifdef DEBUG printf("t parameter: %f \t", t); #endif if (t > tmin && t < hitT) { float Ox = v11.w + RayOrigin.x * v11.x + RayOrigin.y * v11.y + RayOrigin.z * v11.z; float Dx = RayDirection.x * v11.x + RayDirection.y * v11.y + RayDirection.z * v11.z; float u = Ox + t * Dx; #ifdef DEBUG printf("hit\n"); printf("u coordinate: %f \t", u); #endif if (u >= 0.0f && u <= 1.0f) { float Oy = v22.w + RayOrigin.x * v22.x + RayOrigin.y * v22.y + RayOrigin.z * v22.z; float Dy = RayDirection.x * v22.x + RayDirection.y * v22.y + RayDirection.z * v22.z; float v = Oy + t*Dy; #ifdef DEBUG printf("hit\n"); printf("v coordinate: %f \t", v); #endif if (v >= 0.0f && u + v <= 1.0f) { triangleuv.x = u; triangleuv.y = v; hitT = t; hitAddr = triAddr; #ifdef DEBUG printf("TRIANGLE HIT: 0x%x at %f\n", triAddr, hitT); #endif if (anyhit) { nodeAddr = EntrypointSentinel; break; } } } } #ifdef DEBUG if (hitT != t) printf("MISS\n"); #endif } // Get next node? leafAddr = nodeAddr; // If leaf node, pop next node from stack? if (nodeAddr < 0) { nodeAddr = *stackPtr; stackPtr--; } } } // QUESTION: What happens if nothing is hit? rayResultBuffer[rayidx].t_triId_u_v = make_float4( hitT, int_as_float(hitAddr), triangleuv.x, triangleuv.y ); #ifdef DEBUG printf("\nResult: \n"); printf("t: %f, u: %f, v: %f, triangle offset: 0x%x\n", hitT, triangleuv.x, triangleuv.y, hitAddr); #endif #endif } __host__ void rtBindBVH2Data( const float4* InBVHTreeNodes, const float4* InTriangleWoopCoordinates, const int* InMappingFromTriangleAddressToIndex) { cudaCheck(hipMemcpyToSymbol(MappingFromTriangleAddressToIndex, &InMappingFromTriangleAddressToIndex, 1 * sizeof(InMappingFromTriangleAddressToIndex))); cudaCheck(hipMemcpyToSymbol(TriangleWoopCoordinates, &InTriangleWoopCoordinates, 1 * sizeof(InTriangleWoopCoordinates))); // sizeof(InBVHTreeNodes) == 8, copy address from InBVHNodes to BVHTreeNodes? cudaCheck(hipMemcpyToSymbol(BVHTreeNodes, &InBVHTreeNodes, 1 * sizeof(InBVHTreeNodes))); } __host__ void rtTraceBVH2( Ray* rayBuffer, Hit* rayResultBuffer, int rayCount, bool anyhit ) { #ifdef ENABLE_PROFILING float elapsedTime; hipEvent_t startEvent, stopEvent; cudaCheck(hipEventCreate(&startEvent)); cudaCheck(hipEventCreate(&stopEvent)); #endif int* cudaFinishedRayCount; cudaCheck(hipMalloc(&cudaFinishedRayCount, sizeof(int))); hipMemset(cudaFinishedRayCount, 0, sizeof(int)); dim3 blockDim(128, 1); dim3 gridDim(idivCeil(rayCount, blockDim.x), 1); #ifdef ENABLE_PROFILING hipProfilerStart(); cudaCheck(hipEventRecord(startEvent, 0)); #endif Log("start Aila tracing\n"); hipLaunchKernelGGL(( rtTraceBVH2Plain) , dim3(gridDim), dim3(blockDim) , 0, 0, rayBuffer, rayResultBuffer, rayCount, cudaFinishedRayCount, anyhit ); #ifdef ENABLE_PROFILING cudaCheck(hipEventRecord(stopEvent, 0)); cudaCheck(hipEventSynchronize(stopEvent)); cudaCheck(hipEventElapsedTime(&elapsedTime, startEvent, stopEvent)); Log("%.3fMS, %.2lfMRays/s (rtTraceBVH2 No Dynamic Fetch)", elapsedTime, (double)rayCount / 1000000.0f / (elapsedTime / 1000.0f)); hipProfilerStop(); #endif hipFree(cudaFinishedRayCount); }
e0ac7b501dbddca9ec29b130e9dc279522fb3e80.cu
#include <cuda_profiler_api.h> #include "helper_math.h" #include "FastDeviceMinMax.h" #include "Logger.h" #include "CUDAAssert.h" #include <cstdio> __device__ float4* BVHTreeNodes; __device__ float4* TriangleWoopCoordinates; __device__ int* MappingFromTriangleAddressToIndex; __device__ void print_float4(float4 printVal) { printf("%f, %f, %f, %f\n", printVal.x, printVal.y, printVal.z, printVal.w); } #ifdef MAGIC __device__ __noinline__ void __traceRay( Ray rayProperties, Hit* rayResultBuffer, const float4* startingNode, const float4* triNode ) { printf("Magic Function\n"); /* Ray struct definition origin_tmin = make_float4(o_x, o_y, o_z, t_min); dir_tmax = make_float4(d_x, d_y, d_z, t_max); */ float3 RayOrigin = make_float3(rayProperties.origin_tmin); float3 RayDirection = make_float3(rayProperties.dir_tmax); float tmin = rayProperties.origin_tmin.w; float tmax = rayProperties.dir_tmax.w; printf("tmin: %f\n", tmin); printf("tmax: %f\n", tmax); printf("ray origin: %f, %f, %f\n", RayOrigin.x, RayOrigin.y, RayOrigin.z); printf("ray direction: %f, %f, %f\n", RayDirection.x, RayDirection.y, RayDirection.z); printf("starting node address: 0x%x\n", startingNode); printf("triangle node address: 0x%x\n", triNode); float4 node = *startingNode; printf("node content: %f, %f, %f, %f\n", node.x, node.y, node.z, node.w); printf("result buffer address: 0x%x\n", rayResultBuffer); print_float4((*rayResultBuffer).t_triId_u_v); printf("anyhit: %d\n", rayProperties.anyhit); return; } #else __device__ inline bool RayBoxIntersection(float3 Low, float3 High, float3 InvDir, float3 Ood, float TMin, float TMax, float& OutIntersectionDist) { // ood = RayOrigin * idir; const float3 lo = Low * InvDir - Ood; // (Low - RayOrigin) / Direction const float3 hi = High * InvDir - Ood; const float slabMin = tMinFermi(lo.x, hi.x, lo.y, hi.y, lo.z, hi.z, TMin); const float slabMax = tMaxFermi(lo.x, hi.x, lo.y, hi.y, lo.z, hi.z, TMax); #ifdef DEBUG printf("low: %f, %f, %f\thigh: %f, %f, %f\n", Low.x, Low.y, Low.z, High.x, High.y, High.z); printf("lo: %f, %f, %f\thi: %f, %f, %f\n", lo.x, lo.y, lo.z, hi.x, hi.y, hi.z); printf("slabMin: %f\tslabMax: %f\n", slabMin, slabMax); #endif OutIntersectionDist = slabMin; return slabMin <= slabMax; } #endif __global__ void rtTraceBVH2Plain( Ray* rayBuffer, Hit* rayResultBuffer, int rayCount, int* finishedRayCount, bool anyhit ) { int rayidx = blockIdx.x * blockDim.x + threadIdx.x; if (rayidx >= rayCount) return; const float4* localBVHTreeNodes = BVHTreeNodes; const float4* localTriangleWoopCoordinates = TriangleWoopCoordinates; #ifdef MAGIC // Magic function to traverse BVH tree and test for hits, results stored into result buffer // localBVHTreeNodes is set by host in BVHManager.cpp > buildBVH2 // printf("Starting node %f\n", localBVHTreeNodes); // printf("Ray tmin %f\n", rayBuffer[rayidx].origin_tmin.w); rayBuffer[rayidx].anyhit = anyhit; __traceRay(rayBuffer[rayidx], &rayResultBuffer[rayidx], localBVHTreeNodes, localTriangleWoopCoordinates); #ifdef DEBUG printf("Traced result t: %f\n", rayResultBuffer[rayidx].t_triId_u_v.x); #endif #else // Setup traversal + initialisation const int EntrypointSentinel = 0x76543210; const int STACK_SIZE = 32; const float ooeps = exp2f(-80.0f); // Avoid div by zero, returns 1/2^80, an extremely small number int traversalStack[STACK_SIZE]; traversalStack[0] = EntrypointSentinel; // Bottom-most entry. 0x76543210 (1985229328 in decimal) int* stackPtr = &traversalStack[0]; // point stackPtr to bottom of traversal stack = EntryPointSentinel int nodeAddr = 0; // Start from the root. int hitAddr = -1; // No triangle intersected so far. int leafAddr = 0; float3 idir; // (1 / ray direction) float3 ood; float2 triangleuv; // Software Aila algorithm float3 RayOrigin = make_float3(rayBuffer[rayidx].origin_tmin); float3 RayDirection = make_float3(rayBuffer[rayidx].dir_tmax); float tmin = rayBuffer[rayidx].origin_tmin.w; float hitT = rayBuffer[rayidx].dir_tmax.w; // ooeps is very small number, used instead of raydir xyz component when that component is near zero idir.x = 1.0f / (fabsf(RayDirection.x) > ooeps ? RayDirection.x : copysignf(ooeps, RayDirection.x)); // inverse ray direction idir.y = 1.0f / (fabsf(RayDirection.y) > ooeps ? RayDirection.y : copysignf(ooeps, RayDirection.y)); // inverse ray direction idir.z = 1.0f / (fabsf(RayDirection.z) > ooeps ? RayDirection.z : copysignf(ooeps, RayDirection.z)); // inverse ray direction ood = RayOrigin * idir; #ifdef DEBUG printf("Ray origin: %f, %f, %f\n", RayOrigin.x, RayOrigin.y, RayOrigin.z); printf("Ray direction: %f, %f, %f\n", RayDirection.x, RayDirection.y, RayDirection.z); printf("Inverse direction: %f, %f, %f\n", idir.x, idir.y, idir.z); printf("OOD: %f, %f, %f\n", ood.x, ood.y, ood.z); #endif // Traversal loop. while (nodeAddr != EntrypointSentinel) { leafAddr = 0; while (nodeAddr != EntrypointSentinel && nodeAddr >= 0) { #ifdef DEBUG printf("\n"); #endif const float4 n0xy = __ldg(localBVHTreeNodes + nodeAddr + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y) const float4 n1xy = __ldg(localBVHTreeNodes + nodeAddr + 1); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y) const float4 n01z = __ldg(localBVHTreeNodes + nodeAddr + 2); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z) // Are child_index0 and child_index1 next to each other? float4 tmp = BVHTreeNodes[nodeAddr + 3]; // child_index0, child_index1 // Convert float4 into 2 float2s? int2 cnodes = *(int2*)&tmp; const float3 c0lo = make_float3(n0xy.x, n0xy.z, n01z.x); const float3 c0hi = make_float3(n0xy.y, n0xy.w, n01z.y); const float3 c1lo = make_float3(n1xy.x, n1xy.z, n01z.z); const float3 c1hi = make_float3(n1xy.y, n1xy.w, n01z.w); float c0dist, c1dist; // Ray box test on both child nodes bool traverseChild0 = RayBoxIntersection(c0lo, c0hi, idir, ood, tmin, hitT, c0dist); bool traverseChild1 = RayBoxIntersection(c1lo, c1hi, idir, ood, tmin, hitT, c1dist); #ifdef DEBUG printf("node addr: 0x%x\n", nodeAddr); print_float4(n0xy); print_float4(n1xy); print_float4(n01z); printf("cnodes: 0x%x, 0x%x\n", cnodes.x, cnodes.y); printf("C0hit: %d\t C1hit: %d\n", traverseChild0, traverseChild1); printf("C0dist: %f\t C1dist: %f\n", c0dist, c1dist); #endif // Check which child is closer? bool swp = c1dist < c0dist; // If both nodes miss, move to next node in stack if (!traverseChild0 && !traverseChild1) { nodeAddr = *stackPtr; stackPtr--; // printf("Both children miss; node addr: 0x%x\n", nodeAddr); } else { // If first child box hit, use child_index0? nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y; // printf("Traverse first child; node addr: 0x%x\n", nodeAddr); if (traverseChild0 && traverseChild1) { // If both boxes hit, check which one was closer and swap child_index0 and child_index1? if (swp) swap(nodeAddr, cnodes.y); // Push the farther node to the stack? stackPtr++; *stackPtr = cnodes.y; // printf("Both hit, traverse (closer) child; node addr: 0x%x\n", nodeAddr); } } // When is nodeAddr < 0? Are all leaf addresses negative? if (nodeAddr < 0 && leafAddr >= 0) { // Reached leaves. Pop next node from stack? leafAddr = nodeAddr; nodeAddr = *stackPtr; stackPtr--; // printf("Reached leaves; node addr: %x\n", nodeAddr); } if (!__any_sync(__activemask(), leafAddr >= 0)) break; } #ifdef DEBUG printf("Transition to leaves.\n"); #endif // Leaf intersections? while (leafAddr < 0) { for (int triAddr = ~leafAddr;; triAddr += 3) { #ifdef DEBUG printf("\nLeaf address: 0x%x\t", leafAddr); printf("Triangle address: 0x%x\n", triAddr); #endif // Get vertices? float4 v00 = __ldg(localTriangleWoopCoordinates + triAddr + 0); float4 v11 = __ldg(localTriangleWoopCoordinates + triAddr + 1); float4 v22 = __ldg(localTriangleWoopCoordinates + triAddr + 2); // End condition? if (__float_as_int(v00.x) == 0x80000000) { // printf("%d\n", (*(int*)&v00.x == 0x80000000)); break; } #ifdef DEBUG // printf("Triangle base: 0x%x, Triangle offset: 0x%x\n", localTriangleWoopCoordinates, triAddr); print_float4(v00); print_float4(v11); print_float4(v22); #endif // Multi-stage hit algorithm? float Oz = v00.w - RayOrigin.x * v00.x - RayOrigin.y * v00.y - RayOrigin.z * v00.z; float invDz = 1.0f / (RayDirection.x*v00.x + RayDirection.y*v00.y + RayDirection.z*v00.z); float t = Oz * invDz; #ifdef DEBUG printf("t parameter: %f \t", t); #endif if (t > tmin && t < hitT) { float Ox = v11.w + RayOrigin.x * v11.x + RayOrigin.y * v11.y + RayOrigin.z * v11.z; float Dx = RayDirection.x * v11.x + RayDirection.y * v11.y + RayDirection.z * v11.z; float u = Ox + t * Dx; #ifdef DEBUG printf("hit\n"); printf("u coordinate: %f \t", u); #endif if (u >= 0.0f && u <= 1.0f) { float Oy = v22.w + RayOrigin.x * v22.x + RayOrigin.y * v22.y + RayOrigin.z * v22.z; float Dy = RayDirection.x * v22.x + RayDirection.y * v22.y + RayDirection.z * v22.z; float v = Oy + t*Dy; #ifdef DEBUG printf("hit\n"); printf("v coordinate: %f \t", v); #endif if (v >= 0.0f && u + v <= 1.0f) { triangleuv.x = u; triangleuv.y = v; hitT = t; hitAddr = triAddr; #ifdef DEBUG printf("TRIANGLE HIT: 0x%x at %f\n", triAddr, hitT); #endif if (anyhit) { nodeAddr = EntrypointSentinel; break; } } } } #ifdef DEBUG if (hitT != t) printf("MISS\n"); #endif } // Get next node? leafAddr = nodeAddr; // If leaf node, pop next node from stack? if (nodeAddr < 0) { nodeAddr = *stackPtr; stackPtr--; } } } // QUESTION: What happens if nothing is hit? rayResultBuffer[rayidx].t_triId_u_v = make_float4( hitT, int_as_float(hitAddr), triangleuv.x, triangleuv.y ); #ifdef DEBUG printf("\nResult: \n"); printf("t: %f, u: %f, v: %f, triangle offset: 0x%x\n", hitT, triangleuv.x, triangleuv.y, hitAddr); #endif #endif } __host__ void rtBindBVH2Data( const float4* InBVHTreeNodes, const float4* InTriangleWoopCoordinates, const int* InMappingFromTriangleAddressToIndex) { cudaCheck(cudaMemcpyToSymbol(MappingFromTriangleAddressToIndex, &InMappingFromTriangleAddressToIndex, 1 * sizeof(InMappingFromTriangleAddressToIndex))); cudaCheck(cudaMemcpyToSymbol(TriangleWoopCoordinates, &InTriangleWoopCoordinates, 1 * sizeof(InTriangleWoopCoordinates))); // sizeof(InBVHTreeNodes) == 8, copy address from InBVHNodes to BVHTreeNodes? cudaCheck(cudaMemcpyToSymbol(BVHTreeNodes, &InBVHTreeNodes, 1 * sizeof(InBVHTreeNodes))); } __host__ void rtTraceBVH2( Ray* rayBuffer, Hit* rayResultBuffer, int rayCount, bool anyhit ) { #ifdef ENABLE_PROFILING float elapsedTime; cudaEvent_t startEvent, stopEvent; cudaCheck(cudaEventCreate(&startEvent)); cudaCheck(cudaEventCreate(&stopEvent)); #endif int* cudaFinishedRayCount; cudaCheck(cudaMalloc(&cudaFinishedRayCount, sizeof(int))); cudaMemset(cudaFinishedRayCount, 0, sizeof(int)); dim3 blockDim(128, 1); dim3 gridDim(idivCeil(rayCount, blockDim.x), 1); #ifdef ENABLE_PROFILING cudaProfilerStart(); cudaCheck(cudaEventRecord(startEvent, 0)); #endif Log("start Aila tracing\n"); rtTraceBVH2Plain <<< gridDim, blockDim >>> ( rayBuffer, rayResultBuffer, rayCount, cudaFinishedRayCount, anyhit ); #ifdef ENABLE_PROFILING cudaCheck(cudaEventRecord(stopEvent, 0)); cudaCheck(cudaEventSynchronize(stopEvent)); cudaCheck(cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent)); Log("%.3fMS, %.2lfMRays/s (rtTraceBVH2 No Dynamic Fetch)", elapsedTime, (double)rayCount / 1000000.0f / (elapsedTime / 1000.0f)); cudaProfilerStop(); #endif cudaFree(cudaFinishedRayCount); }
8a4565ba3a06d1e469f741a2bb16348b2c4aeac1.hip
// !!! This is a file automatically generated by hipify!!! #include "IPsecAES_CBC_kernel_core.hh" #include "IPsecAES_CBC_kernel.hh" #include <hip/hip_runtime.h> #include "../../engines/cuda/utils.hh" #include <stdint.h> #include <assert.h> #include <stdio.h> /******************************************************************* AES CBC kernel ******************************************************************/ /* former prototype __global__ void AES_cbc_128_encrypt_kernel_SharedMem_cbc(const uint8_t *in_all, uint8_t *out_all, const uint32_t *pkt_offset, const uint8_t *keys, uint8_t *ivs, const unsigned int num_flows, uint8_t *checkbits = 0) */ __global__ void AES_cbc_128_encrypt_kernel_SharedMem_cbc( const uint8_t *in_all, uint8_t *out_all, size_t *input_size_arr, size_t *output_size_arr, int num_flows, uint8_t *checkbits, int *key_idxs, struct aes_sa_entry *key_array, uint8_t *ivs, const uint32_t *pkt_offset ) { __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* computer the thread id */ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= num_flows) return; /* initialize T boxes */ for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) { unsigned index = threadIdx.x + i * blockDim.x; if (index >= num_flows) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; } for(unsigned i = 0; i * blockDim.x < 10; i++){ int index = threadIdx.x + blockDim.x * i; if(index < 10){ shared_Rcon[index] = rcon[index]; } } /* make sure T boxes have been initialized. */ __syncthreads(); /* Locate data */ const uint8_t *in = pkt_offset[idx] + in_all; uint8_t *out = pkt_offset[idx] + out_all; /* int temp = key_idxs[idx]; assert(temp == key_array[temp].entry_idx); assert(key_array[temp].aes_key != NULL); */ const uint8_t *key = key_array[key_idxs[idx]].aes_key; uint8_t *ivec = idx * AES_BLOCK_SIZE + ivs; /* Encrypt using cbc mode */ unsigned long len = pkt_offset[idx + 1] - pkt_offset[idx]; const unsigned char *iv = ivec; while (len >= AES_BLOCK_SIZE) { *((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv); *(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1); AES_128_encrypt_cbc(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } if (len) { for(unsigned n = 0; n < len; ++n) out[n] = in[n] ^ iv[n]; for(unsigned n = len; n < AES_BLOCK_SIZE; ++n) out[n] = iv[n]; AES_128_encrypt_cbc(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; } *((uint4*)ivec) = *((uint4*)iv); __syncthreads(); if (threadIdx.x == 0 && checkbits != 0) *(checkbits + blockIdx.x) = 1; } __global__ void AES_cbc_128_decrypt_kernel_SharedMem_cbc(const uint8_t *in_all, uint8_t *out_all, uint8_t *keys, uint8_t *ivs, uint16_t *pkt_index, unsigned long block_count, uint8_t *checkbits = 0 ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; __shared__ uint32_t shared_Td0[256]; __shared__ uint32_t shared_Td1[256]; __shared__ uint32_t shared_Td2[256]; __shared__ uint32_t shared_Td3[256]; __shared__ uint8_t shared_Td4[256]; __shared__ uint32_t shared_Rcon[10]; __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; /* computer the thread id */ /* initialize T boxes */ for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) { unsigned index = threadIdx.x + i * blockDim.x; if (index >= 256) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; shared_Td0[index] = Td0_ConstMem[index]; shared_Td1[index] = Td1_ConstMem[index]; shared_Td2[index] = Td2_ConstMem[index]; shared_Td3[index] = Td3_ConstMem[index]; shared_Td4[index] = Td4_ConstMem[index]; } for(unsigned i = 0; i * blockDim.x < 10; i++){ int index = threadIdx.x + blockDim.x * i; if(index < 10){ shared_Rcon[index] = rcon[index]; } } for (unsigned i = 0; i * blockDim.x < 10; i++) { int index = threadIdx.x + blockDim.x * i; if (index < 10) { shared_Rcon[index] = rcon[index]; } } __syncthreads(); if (idx >= block_count) return; /* Locate data */ const uint8_t *in = idx * AES_BLOCK_SIZE + in_all; uint8_t *out = idx * AES_BLOCK_SIZE + out_all; uint16_t packet_index = pkt_index[idx]; uint32_t rk[4]; rk[0] = *((uint32_t*)(keys + 16 * packet_index)); rk[1] = *((uint32_t*)(keys + 16 * packet_index + 4)); rk[2] = *((uint32_t*)(keys + 16 * packet_index + 8)); rk[3] = *((uint32_t*)(keys + 16 * packet_index + 12)); uint8_t *ivec = packet_index * AES_BLOCK_SIZE + ivs; /* Decrypt using cbc mode */ const unsigned char *iv; if (idx == 0 || pkt_index[idx] != pkt_index[idx-1]) iv = ivec; else iv = in - AES_BLOCK_SIZE; AES_128_decrypt_cbc(in, out, rk, shared_Td0, shared_Td1, shared_Td2, shared_Td3, shared_Td4, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); *((uint64_t*)out) = *((uint64_t*)out) ^ *((uint64_t*)iv); *(((uint64_t*)out) + 1) = *(((uint64_t*)out) + 1) ^ *(((uint64_t*)iv) + 1); __syncthreads(); if (threadIdx.x == 0 && checkbits != 0) *(checkbits + blockIdx.x) = 1; } /******************************************************************* AES ECB kernel ******************************************************************/ __global__ void AES_ecb_encrypt_kernel_cbc(const uint8_t *in_all, uint8_t *out_all, const uint8_t *keys, uint16_t *pkt_index, unsigned long block_count ) { __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* computer the thread id */ int idx = blockDim.x * blockIdx.x + threadIdx.x; /* initialize T boxes, #threads in block should be larger than 256 */ for (unsigned i = 0; i * blockDim.x < 256; i++) { unsigned index = i * blockDim.x + threadIdx.x; if (index >= 256) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; } for (unsigned i = 0; i * blockDim.x < 10; i++) { unsigned index = threadIdx.x + blockDim.x * i; if (index < 10) { shared_Rcon[index] = rcon[index]; } } if (idx >= block_count) return; /* make sure T boxes have been initialized. */ __syncthreads(); /* Locate data */ const uint8_t *in = idx * AES_BLOCK_SIZE + in_all; uint8_t *out = idx * AES_BLOCK_SIZE + out_all; uint16_t pktIndex = pkt_index[idx]; const uint8_t *key = pktIndex * 16 + keys; AES_128_encrypt_cbc(in, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); } /************************************************************************** Exported C++ function wrapper function for CUDA kernel ***************************************************************************/ /* * Sangwook: Those wrapper functions are not used in NBA. void AES_cbc_128_decrypt_gpu(const uint8_t *in_d, uint8_t *out_d, uint8_t *keys_d, uint8_t *ivs_d, uint16_t *pkt_index_d, unsigned long block_count, uint8_t *checkbits_d, const unsigned int threads_per_blk, hipStream_t stream ) { unsigned int num_cuda_blks = (block_count+threads_per_blk - 1) / threads_per_blk; if (stream == 0) { AES_cbc_128_decrypt_kernel_SharedMem_cbc<<<num_cuda_blks, threads_per_blk>>>( in_d, out_d, keys_d, ivs_d, pkt_index_d, block_count, checkbits_d); } else { AES_cbc_128_decrypt_kernel_SharedMem_cbc<<<num_cuda_blks, threads_per_blk, 0, stream>>>( in_d, out_d, keys_d, ivs_d, pkt_index_d, block_count, checkbits_d); } } void AES_cbc_128_encrypt_gpu(const uint8_t *in_d, uint8_t *out_d, const uint32_t *pkt_offset_d, const uint8_t *keys_d, uint8_t *ivs_d, const unsigned int num_flows, uint8_t *checkbits_d, const unsigned int threads_per_blk, hipStream_t stream) { unsigned int num_cuda_blks = (num_flows+threads_per_blk - 1) / threads_per_blk; if (stream == 0) { AES_cbc_128_encrypt_kernel_SharedMem_cbc<<<num_cuda_blks, threads_per_blk>>>( in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d); } else { AES_cbc_128_encrypt_kernel_SharedMem_cbc<<<num_cuda_blks, threads_per_blk, 0, stream>>>( in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d); } } void AES_ecb_128_encrypt_gpu(const uint8_t *in_d, uint8_t *out_d, const uint8_t *keys_d, uint16_t *pkt_index_d, unsigned long block_count, const unsigned int threads_per_blk, hipStream_t stream) { unsigned int num_cuda_blks = (block_count + threads_per_blk - 1) / threads_per_blk; if (stream == 0) { AES_ecb_encrypt_kernel_cbc<<<num_cuda_blks, threads_per_blk>>>( in_d, out_d, keys_d, pkt_index_d, block_count); } else { AES_ecb_encrypt_kernel_cbc<<<num_cuda_blks, threads_per_blk, 0, stream>>>( in_d, out_d, keys_d, pkt_index_d, block_count); } } */ /************************************************************************** Key Setup for Decryption ***************************************************************************/ void AES_decrypt_key_prepare_cbc(uint8_t *dec_key, const uint8_t *enc_key, unsigned int key_bits) { uint32_t rk_buf[60]; uint32_t *rk = rk_buf; int i = 0; uint32_t temp; rk[0] = GETU32_cbc(enc_key ); rk[1] = GETU32_cbc(enc_key + 4); rk[2] = GETU32_cbc(enc_key + 8); rk[3] = GETU32_cbc(enc_key + 12); if (key_bits == 128) { for (;;) { temp = rk[3]; rk[4] = rk[0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon_host[i]; rk[5] = rk[1] ^ rk[4]; rk[6] = rk[2] ^ rk[5]; rk[7] = rk[3] ^ rk[6]; if (++i == 10) { rk += 4; goto end; } rk += 4; } } rk[4] = GETU32_cbc(enc_key + 16); rk[5] = GETU32_cbc(enc_key + 20); if (key_bits == 192) { for (;;) { temp = rk[ 5]; rk[ 6] = rk[ 0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon_host[i]; rk[ 7] = rk[ 1] ^ rk[ 6]; rk[ 8] = rk[ 2] ^ rk[ 7]; rk[ 9] = rk[ 3] ^ rk[ 8]; if (++i == 8) { rk += 6; goto end; } rk[10] = rk[ 4] ^ rk[ 9]; rk[11] = rk[ 5] ^ rk[10]; rk += 6; } } rk[6] = GETU32_cbc(enc_key + 24); rk[7] = GETU32_cbc(enc_key + 28); if (key_bits == 256) { for (;;) { temp = rk[ 7]; rk[ 8] = rk[ 0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon_host[i]; rk[ 9] = rk[ 1] ^ rk[ 8]; rk[10] = rk[ 2] ^ rk[ 9]; rk[11] = rk[ 3] ^ rk[10]; if (++i == 7) { rk += 8; goto end; } temp = rk[11]; rk[12] = rk[ 4] ^ (Te4[(temp >> 24) ] & 0xff000000) ^ (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ (Te4[(temp ) & 0xff] & 0x000000ff); rk[13] = rk[ 5] ^ rk[12]; rk[14] = rk[ 6] ^ rk[13]; rk[15] = rk[ 7] ^ rk[14]; rk += 8; } } end: memcpy(dec_key, rk, 16); } /************************************************************************** Experimental Codes ***************************************************************************/ __global__ void computeAES_CBC( uint8_t* input_buf, uint8_t *output_buf, size_t *input_size_arr, size_t *output_size_arr, int N, uint8_t *checkbits_d, const uint8_t* __restrict__ ivs, const int32_t* __restrict__ key_idxs, const struct aes_sa_entry* __restrict__ aes_key_array, const int32_t* __restrict__ offsets) { /* computer the thread id */ int idx = blockDim.x * blockIdx.x + threadIdx.x; int index = idx; if (idx < N) { /* Locate data */ const uint8_t *in = input_buf + offsets[idx]; uint8_t *out = output_buf + offsets[idx]; __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* int temp = key_idxs[idx]; assert(temp == key_array[temp].entry_idx); assert(key_array[temp].aes_key != NULL); */ const uint8_t *key = (const uint8_t*) aes_key_array[key_idxs[index]].aes_key; uint8_t *ivec = (uint8_t*) (idx * AES_BLOCK_SIZE + ivs); /* Encrypt using cbc mode */ unsigned long len = (unsigned long) input_size_arr[index]; const unsigned char *iv = ivec; while (len >= AES_BLOCK_SIZE) { *((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv); *(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1); AES_128_encrypt_cbc(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } if (len) { for(unsigned n = 0; n < len; ++n) out[n] = in[n] ^ iv[n]; for(unsigned n = len; n < AES_BLOCK_SIZE; ++n) out[n] = iv[n]; AES_128_encrypt_cbc(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; } *((uint4*)ivec) = *((uint4*)iv); } __syncthreads(); if (threadIdx.x == 0 && checkbits_d != 0) *(checkbits_d + blockIdx.x) = 1; } /* Among AES_cbc_128_decryption, AES_cbc_128_encryption, * AES_ecb_128_encryption and AES_decrypt_key_prepare_cbc(), * AES_cbc_128_encrypt_gpu() is only used in NBA, for now. */ void *nba::ipsec_aes_encryption_cbc_get_cuda_kernel() { return reinterpret_cast<void *> (computeAES_CBC); }
8a4565ba3a06d1e469f741a2bb16348b2c4aeac1.cu
#include "IPsecAES_CBC_kernel_core.hh" #include "IPsecAES_CBC_kernel.hh" #include <cuda.h> #include "../../engines/cuda/utils.hh" #include <stdint.h> #include <assert.h> #include <stdio.h> /******************************************************************* AES CBC kernel ******************************************************************/ /* former prototype __global__ void AES_cbc_128_encrypt_kernel_SharedMem_cbc(const uint8_t *in_all, uint8_t *out_all, const uint32_t *pkt_offset, const uint8_t *keys, uint8_t *ivs, const unsigned int num_flows, uint8_t *checkbits = 0) */ __global__ void AES_cbc_128_encrypt_kernel_SharedMem_cbc( const uint8_t *in_all, uint8_t *out_all, size_t *input_size_arr, size_t *output_size_arr, int num_flows, uint8_t *checkbits, int *key_idxs, struct aes_sa_entry *key_array, uint8_t *ivs, const uint32_t *pkt_offset ) { __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* computer the thread id */ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= num_flows) return; /* initialize T boxes */ for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) { unsigned index = threadIdx.x + i * blockDim.x; if (index >= num_flows) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; } for(unsigned i = 0; i * blockDim.x < 10; i++){ int index = threadIdx.x + blockDim.x * i; if(index < 10){ shared_Rcon[index] = rcon[index]; } } /* make sure T boxes have been initialized. */ __syncthreads(); /* Locate data */ const uint8_t *in = pkt_offset[idx] + in_all; uint8_t *out = pkt_offset[idx] + out_all; /* int temp = key_idxs[idx]; assert(temp == key_array[temp].entry_idx); assert(key_array[temp].aes_key != NULL); */ const uint8_t *key = key_array[key_idxs[idx]].aes_key; uint8_t *ivec = idx * AES_BLOCK_SIZE + ivs; /* Encrypt using cbc mode */ unsigned long len = pkt_offset[idx + 1] - pkt_offset[idx]; const unsigned char *iv = ivec; while (len >= AES_BLOCK_SIZE) { *((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv); *(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1); AES_128_encrypt_cbc(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } if (len) { for(unsigned n = 0; n < len; ++n) out[n] = in[n] ^ iv[n]; for(unsigned n = len; n < AES_BLOCK_SIZE; ++n) out[n] = iv[n]; AES_128_encrypt_cbc(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; } *((uint4*)ivec) = *((uint4*)iv); __syncthreads(); if (threadIdx.x == 0 && checkbits != 0) *(checkbits + blockIdx.x) = 1; } __global__ void AES_cbc_128_decrypt_kernel_SharedMem_cbc(const uint8_t *in_all, uint8_t *out_all, uint8_t *keys, uint8_t *ivs, uint16_t *pkt_index, unsigned long block_count, uint8_t *checkbits = 0 ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; __shared__ uint32_t shared_Td0[256]; __shared__ uint32_t shared_Td1[256]; __shared__ uint32_t shared_Td2[256]; __shared__ uint32_t shared_Td3[256]; __shared__ uint8_t shared_Td4[256]; __shared__ uint32_t shared_Rcon[10]; __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; /* computer the thread id */ /* initialize T boxes */ for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) { unsigned index = threadIdx.x + i * blockDim.x; if (index >= 256) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; shared_Td0[index] = Td0_ConstMem[index]; shared_Td1[index] = Td1_ConstMem[index]; shared_Td2[index] = Td2_ConstMem[index]; shared_Td3[index] = Td3_ConstMem[index]; shared_Td4[index] = Td4_ConstMem[index]; } for(unsigned i = 0; i * blockDim.x < 10; i++){ int index = threadIdx.x + blockDim.x * i; if(index < 10){ shared_Rcon[index] = rcon[index]; } } for (unsigned i = 0; i * blockDim.x < 10; i++) { int index = threadIdx.x + blockDim.x * i; if (index < 10) { shared_Rcon[index] = rcon[index]; } } __syncthreads(); if (idx >= block_count) return; /* Locate data */ const uint8_t *in = idx * AES_BLOCK_SIZE + in_all; uint8_t *out = idx * AES_BLOCK_SIZE + out_all; uint16_t packet_index = pkt_index[idx]; uint32_t rk[4]; rk[0] = *((uint32_t*)(keys + 16 * packet_index)); rk[1] = *((uint32_t*)(keys + 16 * packet_index + 4)); rk[2] = *((uint32_t*)(keys + 16 * packet_index + 8)); rk[3] = *((uint32_t*)(keys + 16 * packet_index + 12)); uint8_t *ivec = packet_index * AES_BLOCK_SIZE + ivs; /* Decrypt using cbc mode */ const unsigned char *iv; if (idx == 0 || pkt_index[idx] != pkt_index[idx-1]) iv = ivec; else iv = in - AES_BLOCK_SIZE; AES_128_decrypt_cbc(in, out, rk, shared_Td0, shared_Td1, shared_Td2, shared_Td3, shared_Td4, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); *((uint64_t*)out) = *((uint64_t*)out) ^ *((uint64_t*)iv); *(((uint64_t*)out) + 1) = *(((uint64_t*)out) + 1) ^ *(((uint64_t*)iv) + 1); __syncthreads(); if (threadIdx.x == 0 && checkbits != 0) *(checkbits + blockIdx.x) = 1; } /******************************************************************* AES ECB kernel ******************************************************************/ __global__ void AES_ecb_encrypt_kernel_cbc(const uint8_t *in_all, uint8_t *out_all, const uint8_t *keys, uint16_t *pkt_index, unsigned long block_count ) { __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* computer the thread id */ int idx = blockDim.x * blockIdx.x + threadIdx.x; /* initialize T boxes, #threads in block should be larger than 256 */ for (unsigned i = 0; i * blockDim.x < 256; i++) { unsigned index = i * blockDim.x + threadIdx.x; if (index >= 256) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; } for (unsigned i = 0; i * blockDim.x < 10; i++) { unsigned index = threadIdx.x + blockDim.x * i; if (index < 10) { shared_Rcon[index] = rcon[index]; } } if (idx >= block_count) return; /* make sure T boxes have been initialized. */ __syncthreads(); /* Locate data */ const uint8_t *in = idx * AES_BLOCK_SIZE + in_all; uint8_t *out = idx * AES_BLOCK_SIZE + out_all; uint16_t pktIndex = pkt_index[idx]; const uint8_t *key = pktIndex * 16 + keys; AES_128_encrypt_cbc(in, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); } /************************************************************************** Exported C++ function wrapper function for CUDA kernel ***************************************************************************/ /* * Sangwook: Those wrapper functions are not used in NBA. void AES_cbc_128_decrypt_gpu(const uint8_t *in_d, uint8_t *out_d, uint8_t *keys_d, uint8_t *ivs_d, uint16_t *pkt_index_d, unsigned long block_count, uint8_t *checkbits_d, const unsigned int threads_per_blk, cudaStream_t stream ) { unsigned int num_cuda_blks = (block_count+threads_per_blk - 1) / threads_per_blk; if (stream == 0) { AES_cbc_128_decrypt_kernel_SharedMem_cbc<<<num_cuda_blks, threads_per_blk>>>( in_d, out_d, keys_d, ivs_d, pkt_index_d, block_count, checkbits_d); } else { AES_cbc_128_decrypt_kernel_SharedMem_cbc<<<num_cuda_blks, threads_per_blk, 0, stream>>>( in_d, out_d, keys_d, ivs_d, pkt_index_d, block_count, checkbits_d); } } void AES_cbc_128_encrypt_gpu(const uint8_t *in_d, uint8_t *out_d, const uint32_t *pkt_offset_d, const uint8_t *keys_d, uint8_t *ivs_d, const unsigned int num_flows, uint8_t *checkbits_d, const unsigned int threads_per_blk, cudaStream_t stream) { unsigned int num_cuda_blks = (num_flows+threads_per_blk - 1) / threads_per_blk; if (stream == 0) { AES_cbc_128_encrypt_kernel_SharedMem_cbc<<<num_cuda_blks, threads_per_blk>>>( in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d); } else { AES_cbc_128_encrypt_kernel_SharedMem_cbc<<<num_cuda_blks, threads_per_blk, 0, stream>>>( in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d); } } void AES_ecb_128_encrypt_gpu(const uint8_t *in_d, uint8_t *out_d, const uint8_t *keys_d, uint16_t *pkt_index_d, unsigned long block_count, const unsigned int threads_per_blk, cudaStream_t stream) { unsigned int num_cuda_blks = (block_count + threads_per_blk - 1) / threads_per_blk; if (stream == 0) { AES_ecb_encrypt_kernel_cbc<<<num_cuda_blks, threads_per_blk>>>( in_d, out_d, keys_d, pkt_index_d, block_count); } else { AES_ecb_encrypt_kernel_cbc<<<num_cuda_blks, threads_per_blk, 0, stream>>>( in_d, out_d, keys_d, pkt_index_d, block_count); } } */ /************************************************************************** Key Setup for Decryption ***************************************************************************/ void AES_decrypt_key_prepare_cbc(uint8_t *dec_key, const uint8_t *enc_key, unsigned int key_bits) { uint32_t rk_buf[60]; uint32_t *rk = rk_buf; int i = 0; uint32_t temp; rk[0] = GETU32_cbc(enc_key ); rk[1] = GETU32_cbc(enc_key + 4); rk[2] = GETU32_cbc(enc_key + 8); rk[3] = GETU32_cbc(enc_key + 12); if (key_bits == 128) { for (;;) { temp = rk[3]; rk[4] = rk[0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon_host[i]; rk[5] = rk[1] ^ rk[4]; rk[6] = rk[2] ^ rk[5]; rk[7] = rk[3] ^ rk[6]; if (++i == 10) { rk += 4; goto end; } rk += 4; } } rk[4] = GETU32_cbc(enc_key + 16); rk[5] = GETU32_cbc(enc_key + 20); if (key_bits == 192) { for (;;) { temp = rk[ 5]; rk[ 6] = rk[ 0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon_host[i]; rk[ 7] = rk[ 1] ^ rk[ 6]; rk[ 8] = rk[ 2] ^ rk[ 7]; rk[ 9] = rk[ 3] ^ rk[ 8]; if (++i == 8) { rk += 6; goto end; } rk[10] = rk[ 4] ^ rk[ 9]; rk[11] = rk[ 5] ^ rk[10]; rk += 6; } } rk[6] = GETU32_cbc(enc_key + 24); rk[7] = GETU32_cbc(enc_key + 28); if (key_bits == 256) { for (;;) { temp = rk[ 7]; rk[ 8] = rk[ 0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon_host[i]; rk[ 9] = rk[ 1] ^ rk[ 8]; rk[10] = rk[ 2] ^ rk[ 9]; rk[11] = rk[ 3] ^ rk[10]; if (++i == 7) { rk += 8; goto end; } temp = rk[11]; rk[12] = rk[ 4] ^ (Te4[(temp >> 24) ] & 0xff000000) ^ (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ (Te4[(temp ) & 0xff] & 0x000000ff); rk[13] = rk[ 5] ^ rk[12]; rk[14] = rk[ 6] ^ rk[13]; rk[15] = rk[ 7] ^ rk[14]; rk += 8; } } end: memcpy(dec_key, rk, 16); } /************************************************************************** Experimental Codes ***************************************************************************/ __global__ void computeAES_CBC( uint8_t* input_buf, uint8_t *output_buf, size_t *input_size_arr, size_t *output_size_arr, int N, uint8_t *checkbits_d, const uint8_t* __restrict__ ivs, const int32_t* __restrict__ key_idxs, const struct aes_sa_entry* __restrict__ aes_key_array, const int32_t* __restrict__ offsets) { /* computer the thread id */ int idx = blockDim.x * blockIdx.x + threadIdx.x; int index = idx; if (idx < N) { /* Locate data */ const uint8_t *in = input_buf + offsets[idx]; uint8_t *out = output_buf + offsets[idx]; __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* int temp = key_idxs[idx]; assert(temp == key_array[temp].entry_idx); assert(key_array[temp].aes_key != NULL); */ const uint8_t *key = (const uint8_t*) aes_key_array[key_idxs[index]].aes_key; uint8_t *ivec = (uint8_t*) (idx * AES_BLOCK_SIZE + ivs); /* Encrypt using cbc mode */ unsigned long len = (unsigned long) input_size_arr[index]; const unsigned char *iv = ivec; while (len >= AES_BLOCK_SIZE) { *((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv); *(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1); AES_128_encrypt_cbc(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } if (len) { for(unsigned n = 0; n < len; ++n) out[n] = in[n] ^ iv[n]; for(unsigned n = len; n < AES_BLOCK_SIZE; ++n) out[n] = iv[n]; AES_128_encrypt_cbc(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; } *((uint4*)ivec) = *((uint4*)iv); } __syncthreads(); if (threadIdx.x == 0 && checkbits_d != 0) *(checkbits_d + blockIdx.x) = 1; } /* Among AES_cbc_128_decryption, AES_cbc_128_encryption, * AES_ecb_128_encryption and AES_decrypt_key_prepare_cbc(), * AES_cbc_128_encrypt_gpu() is only used in NBA, for now. */ void *nba::ipsec_aes_encryption_cbc_get_cuda_kernel() { return reinterpret_cast<void *> (computeAES_CBC); }
46ff50c9ee5865fd180156ccfa6a9044f8ee6868.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> #include <thrust/partition.h> #include <thrust/random.h> #include <thrust/remove.h> #include <glm/gtc/matrix_inverse.hpp> #include <glm/gtc/matrix_transform.hpp> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #include "macros.h" static float gpu_time_300_iter = 0.0f; #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static int materialSize = 0; static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static Geom * dev_lights = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; static PathSegment* dev_paths_cache = NULL; static PathSegment* dev_intersections_cache = NULL; static BoundingBox* dev_bounding_box = NULL; static std::vector<hipTextureObject_t> cudaTextures; static std::vector<hipArray*> cudaTextureData; static glm::vec2* dev_texDim = NULL; static hipTextureObject_t* dev_cudaTextures = NULL; static example::Material* dev_gltfMateiral = NULL; static Octree* dev_octree = NULL; static OctreeNode* dev_octreeNode = NULL; static int* dev_primsForOcts = NULL; static int* dev_meshTriForOcts = NULL; static int lightLen = 0; const static int depthSize = 0; hipTextureObject_t texTest; // Mesh Loading static float* dev_mesh_pos = NULL; static float* dev_mesh_nor = NULL; static int* dev_mesh_idx = NULL; static float* dev_mesh_uv = NULL; // TODO: static variables for device memory, any extra info you need, etc // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; int sampleCount = pixelcount; materialSize = scene->materials.size(); #ifdef ANTIALIASING sampleCount *= AASAMPLENUM; #endif lightLen = scene->lights.size(); hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_paths, sampleCount * sizeof(PathSegment)); hipMalloc(&dev_paths_cache, sampleCount * sizeof(PathSegment)); hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); hipMalloc(&dev_lights, scene->lights.size() * sizeof(Geom)); hipMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_bounding_box, scene->boundingBoxes.size() * sizeof(BoundingBox)); hipMemcpy(dev_bounding_box, scene->boundingBoxes.data(), scene->boundingBoxes.size() * sizeof(BoundingBox), hipMemcpyHostToDevice); hipMalloc(&dev_intersections, sampleCount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections, 0, sampleCount * sizeof(ShadeableIntersection)); hipMalloc(&dev_intersections_cache, sampleCount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections_cache, 0, sampleCount * sizeof(ShadeableIntersection)); hipMalloc(&dev_gltfMateiral, scene->gltfMaterials.size() * sizeof(example::Material)); hipMemcpy(dev_gltfMateiral, scene->gltfMaterials.data(), scene->gltfMaterials.size() * sizeof(example::Material), hipMemcpyHostToDevice); hipMalloc(&dev_octree, sizeof(scene->octree)); hipMemcpy(dev_octree, &scene->octree, sizeof(scene->octree), hipMemcpyHostToDevice); hipMalloc(&dev_octreeNode, sizeof(OctreeNode) * scene->octree.nodeData.size()); hipMemcpy(dev_octreeNode, scene->octree.nodeData.data(), sizeof(OctreeNode) * scene->octree.nodeData.size(), hipMemcpyHostToDevice); // TODO: initialize any extra device memeory you need hipMalloc(&dev_mesh_idx, scene->faceCount * 3 * sizeof(int)); hipMalloc(&dev_mesh_pos, scene->posCount * sizeof(float)); hipMalloc(&dev_mesh_nor, scene->faceCount * 3 * 3 * sizeof(float)); hipMalloc(&dev_mesh_uv, scene->faceCount * 2 * 3 * sizeof(float)); int curOffset = 0; int curPosOffset = 0; for (int i = 0; i < scene->meshes.size(); i++) { for (int j = 0; j < scene->meshes.at(i).size(); j++) { int stride = scene->meshes.at(i).at(j).faces.size() / (scene->meshes.at(i).at(j).stride / sizeof(float)); int curPosNum = scene->meshes.at(i).at(j).vertices.size(); int final = scene->meshes.at(i).at(j).faces.at(stride * 3 - 1); hipMemcpy(dev_mesh_idx + curOffset * 3, scene->meshes.at(i).at(j).faces.data(), stride * 3 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_mesh_pos + curPosOffset, scene->meshes.at(i).at(j).vertices.data(), curPosNum * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_mesh_nor + curOffset * 3 * 3, scene->meshes.at(i).at(j).facevarying_normals.data(), stride * 3 * 3 * sizeof(float), hipMemcpyHostToDevice); //Load UV hipMemcpy(dev_mesh_uv + curOffset * 2 * 3, scene->meshes.at(i).at(j).facevarying_uvs.data(), stride * 2 * 3 * sizeof(float), hipMemcpyHostToDevice); curOffset += stride; curPosOffset += curPosNum; } } // Load Prim and MeshTri Oct Data hipMalloc(&dev_primsForOcts, scene->octree.primitiveCount * sizeof(int)); hipMalloc(&dev_meshTriForOcts, scene->octree.meshTriCount * sizeof(int)); int curPrimOffset = 0; int curMeshTriOffset = 0; for (int i = 0; i < scene->octree.nodeData.size(); i++) { hipMemcpy(dev_primsForOcts + curPrimOffset, scene->octree.nodeData.at(i).primitiveIndices.data(), sizeof(int) * scene->octree.nodeData.at(i).primitiveCount, hipMemcpyHostToDevice); hipMemcpy(dev_meshTriForOcts + curMeshTriOffset, scene->octree.nodeData.at(i).meshTriangleIndices.data(), sizeof(int) * scene->octree.nodeData.at(i).meshTriCount, hipMemcpyHostToDevice); curPrimOffset += scene->octree.nodeData.at(i).primitiveCount; curMeshTriOffset += scene->octree.nodeData.at(i).meshTriCount; } // Load Textures int count = 0; std::vector<glm::vec2> texDim; for (const auto &tex : scene->gltfTextures) { float4* texTmp = new float4[tex.height * tex.width]; for (int i = 0; i < tex.height * tex.width; i++) { texTmp[i].x = (float)tex.image[4 * i]; texTmp[i].y = (float)tex.image[4 * i + 1]; texTmp[i].z = (float)tex.image[4 * i + 2]; texTmp[i].w = (float)tex.image[4 * i + 3]; } hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>(); // Load Data hipArray* cuArray; hipMallocArray(&cuArray, &channelDesc, tex.width, tex.height); hipMemcpyToArray(cuArray, 0, 0, texTmp, tex.height * tex.width * sizeof(float4), hipMemcpyHostToDevice); // Specify Texture struct hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = cuArray; delete []texTmp; // Specify texture object parameters struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); switch (tex.sampler.wrapS) { case CLAMP_TO_EDGE: texDesc.addressMode[0] = hipAddressModeClamp; break; case MIRRORED_REPEAT: texDesc.addressMode[0] = hipAddressModeMirror; break; case REPEAT: texDesc.addressMode[0] = hipAddressModeWrap; break; } switch (tex.sampler.wrapT) { case CLAMP_TO_EDGE: texDesc.addressMode[1] = hipAddressModeClamp; break; case MIRRORED_REPEAT: texDesc.addressMode[1] = hipAddressModeMirror; break; case REPEAT: texDesc.addressMode[1] = hipAddressModeWrap; break; } switch (tex.sampler.minFilter) { case NEAREST: case NEAREST_MIPMAP_NEAREST: case NEAREST_MIPMAP_LINEAR: texDesc.filterMode = hipFilterModePoint; break; case LINEAR: case LINEAR_MIPMAP_NEAREST: case LINEAR_MIPMAP_LINEAR: texDesc.filterMode = hipFilterModeLinear; break; } texDesc.readMode = hipReadModeElementType; texDesc.normalizedCoords = 0; hipTextureObject_t texObj = 0; hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL); cudaTextureData.push_back(cuArray); cudaTextures.push_back(texObj); texDim.push_back(glm::vec2(tex.width, tex.height)); count++; } hipMalloc(&dev_texDim, texDim.size() * sizeof(glm::vec2)); hipMemcpy(dev_texDim, texDim.data(), texDim.size() * sizeof(glm::vec2), hipMemcpyHostToDevice); hipMalloc(&dev_cudaTextures, cudaTextures.size() * sizeof(hipTextureObject_t)); hipMemcpy(dev_cudaTextures, cudaTextures.data(), cudaTextures.size() * sizeof(hipTextureObject_t), hipMemcpyHostToDevice); checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_paths); hipFree(dev_paths_cache); hipFree(dev_geoms); hipFree(dev_materials); hipFree(dev_lights); hipFree(dev_intersections); hipFree(dev_intersections_cache); hipFree(dev_gltfMateiral); hipFree(dev_texDim); // TODO: clean up any extra device memory you created hipFree(dev_mesh_idx); hipFree(dev_mesh_nor); hipFree(dev_mesh_pos); hipFree(dev_mesh_uv); // Octree hipFree(dev_octreeNode); hipFree(dev_octree); for (int i = 0; i < cudaTextureData.size(); i++) { hipFreeArray(cudaTextureData.at(i)); } cudaTextureData.clear(); cudaTextures.clear(); hipFree(dev_cudaTextures); checkCUDAError("pathtraceFree"); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int aaDim = sqrtf(AASAMPLENUM); int index = x + (y * cam.resolution.x * aaDim); x /= aaDim; y /= aaDim; if (x < cam.resolution.x && y < cam.resolution.y) { PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); // TODO: implement antialiasing by jittering the ray thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, traceDepth); thrust::uniform_real_distribution<float> u01(0, 1); #ifdef ANTIALIASING segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x + u01(rng) - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y + u01(rng) - (float)cam.resolution.y * 0.5f) ); #else segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) ); #endif #ifdef DEPTHOFFIELD // Depth of Field float lenX = u01(rng); float lenY = u01(rng); glm::vec2 pLens = cam.lensRadius * concentricSampling(glm::vec2(lenX, lenY)); float ft = cam.focalDistance / segment.ray.direction.z; glm::vec3 pFocus = segment.ray.origin + cam.focalDistance * segment.ray.direction; segment.ray.origin += glm::vec3(pLens.x, pLens.y, 0.0f); segment.ray.direction = glm::normalize(pFocus - segment.ray.origin); #endif segment.pixelIndex = x + y * cam.resolution.x; segment.remainingBounces = traceDepth; } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , BoundingBox* bbs , int geoms_size , ShadeableIntersection * intersections , float* meshPos , float* meshNor , int* meshIdx , float* meshUV ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; glm::vec2 uv; glm::vec3 tangent; glm::vec3 bitangent; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; bool finalMesh = false; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; glm::vec2 tmp_uv; glm::vec3 tmp_tangent; glm::vec3 tmp_bitangent; intersections[path_index].isMesh = false; // naive parse through global geoms float isMesh = false; for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); isMesh = false; } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); isMesh = false; } else if (geom.type == MESH) { #ifdef BOUNDINGBOX // Bounding Box Test Ray modelRay; modelRay.origin = multiplyMV(geom.inverseTransform, glm::vec4(pathSegment.ray.origin, 1.0f)); modelRay.direction = glm::normalize(multiplyMV(geom.inverseTransform, glm::vec4(pathSegment.ray.direction, 0.0f))); Geom boundingBox; boundingBox.type = CUBE; int boundingIdx = geom.boundingIdx; boundingBox.translation = bbs[boundingIdx].boundingCenter; boundingBox.scale = bbs[boundingIdx].boundingScale; boundingBox.rotation = glm::vec3(0.0f, 0.0f, 0.0f); glm::mat4 translationMat = glm::translate(glm::mat4(), glm::vec3(0.0f, 0.0f, 0.0f)); glm::mat4 rotationMat = glm::rotate(glm::mat4(), boundingBox.rotation.x * (float)PI / 180, glm::vec3(1, 0, 0)); rotationMat = rotationMat * glm::rotate(glm::mat4(), boundingBox.rotation.y * (float)PI / 180, glm::vec3(0, 1, 0)); rotationMat = rotationMat * glm::rotate(glm::mat4(), boundingBox.rotation.z * (float)PI / 180, glm::vec3(0, 0, 1)); glm::mat4 scaleMat = glm::scale(glm::mat4(), boundingBox.scale); boundingBox.transform = translationMat * rotationMat * scaleMat; boundingBox.inverseTransform = glm::inverse(boundingBox.transform); boundingBox.invTranspose = glm::inverseTranspose(boundingBox.transform); glm::vec3 bond_intersect = glm::vec3(0.0f); glm::vec3 bond_normal = glm::vec3(0.0f); bool bond_outside = true; t = boxIntersectionTest(boundingBox, modelRay, bond_intersect, bond_normal, bond_outside); /*t = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, meshPos, meshNor, meshIdx, geom.faceNum, geom.offset);*/ if (t != -1) { t = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, tmp_uv, outside, tmp_tangent, tmp_bitangent, meshPos, meshNor, meshIdx, meshUV, geom.faceNum, geom.offset, geom.posOffset); } #else t = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, tmp_uv, outside, tmp_tangent, tmp_bitangent, meshPos, meshNor, meshIdx, meshUV, geom.faceNum, geom.offset, geom.posOffset); #endif isMesh = true; } // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { if (isMesh) { finalMesh = true; uv = tmp_uv; tangent = tmp_tangent; bitangent = tmp_bitangent; } else { finalMesh = false; } t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; intersections[path_index].uv = uv; intersections[path_index].isMesh = finalMesh; intersections[path_index].surfaceTangent = tangent; intersections[path_index].surfaceBiTangent = bitangent; } } } __global__ void rayOctreeIntersect( int depth , int num_paths , PathSegment* pathSegments , Geom* geoms , BoundingBox* bbs , int geoms_size , ShadeableIntersection* intersections , float* meshPos , float* meshNor , int* meshIdx , float* meshUV , Octree* octTree , OctreeNode* octreeNode , int* primOct , int* meshTriOct) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; int octStack[OCT_MAX_DEPTH + 1]; int childStack[OCT_MAX_DEPTH + 1]; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; intersections[path_index].isMesh = false; intersections[path_index].t = FLT_MAX; int top = 0; octStack[top] = 0; childStack[top] = 0; OctreeNode* curNode; do { int curNodeIndex = octStack[top]; OctreeNode* curNode = &(octreeNode[curNodeIndex]); //First Coming if (childStack[top] == 0) { glm::vec3 bond_intersect = glm::vec3(0.0f); glm::vec3 bond_normal = glm::vec3(0.0f); bool bond_outside = true; int octBoxT = boxIntersectionTest(curNode->octBlock, pathSegment.ray, bond_intersect, bond_normal, bond_outside); if (octBoxT != -1) { float geomT; glm::vec3 intersect_point; glm::vec3 normal; glm::vec2 uv; glm::vec3 tangent; glm::vec3 bitangent; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; bool finalMesh = false; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; glm::vec2 tmp_uv; glm::vec3 tmp_tangent; glm::vec3 tmp_bitangent; intersections[path_index].isMesh = false; // naive parse through global geoms float isMesh = false; for (int i = 0; i < curNode->primitiveCount; i++) { int geomIdx = primOct[curNode->primOffset + i]; Geom& geom = geoms[geomIdx]; if (geom.type == CUBE) { geomT = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); isMesh = false; } else if (geom.type == SPHERE) { geomT = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); isMesh = false; } else if (geom.type == MESH) { geomT = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, tmp_uv, outside, tmp_tangent, tmp_bitangent, meshPos, meshNor, meshIdx, meshUV, geom.faceNum, geom.offset, geom.posOffset); isMesh = true; } // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (geomT > 0.0f && t_min >= geomT) { if (isMesh) { finalMesh = true; uv = tmp_uv; tangent = tmp_tangent; bitangent = tmp_bitangent; } else { finalMesh = false; } t_min = geomT; hit_geom_index = geomIdx; intersect_point = tmp_intersect; normal = tmp_normal; glm::vec3 pMin = curNode->boxCenter - glm::vec3(curNode->scale / 2.0f); glm::vec3 pMax = curNode->boxCenter + glm::vec3(curNode->scale / 2.0f); if (!(pMin.x < tmp_intersect.x && pMax.x > tmp_intersect.x && pMin.y < tmp_intersect.y && pMax.y > tmp_intersect.y && pMin.z < tmp_intersect.z && pMax.z > tmp_intersect.z)) { hit_geom_index = -1; } } } /*for (int i = 0; i < curNode->meshTriCount; i++) { int faceIdx = meshTriOct[curNode->meshTriOffset + i]; Geom meshGeom; int meshGeomIdx = -1; for (int j = 0; j < geoms_size; j++) { if (geoms[j].type != MESH) continue; else if (faceIdx < geoms[j].offset) { meshGeom = geoms[j - 1]; meshGeomIdx = j - 1; } } float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; bool finalMesh = false; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; glm::vec2 tmp_uv; glm::vec3 tmp_tangent; glm::vec3 tmp_bitangent; triIntersectionTest(meshGeom, faceIdx, pathSegment.ray, tmp_intersect, tmp_normal, tmp_uv, outside, tmp_tangent, tmp_bitangent, meshPos, meshNor, meshIdx, meshUV); // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (geomT > 0.0f && t_min >= geomT) { if (isMesh) { finalMesh = true; uv = tmp_uv; tangent = tmp_tangent; bitangent = tmp_bitangent; } else { finalMesh = false; } t_min = geomT; hit_geom_index = meshGeomIdx; intersect_point = tmp_intersect; normal = tmp_normal; glm::vec3 pMin = curNode->boxCenter - glm::vec3(curNode->scale / 2.0f); glm::vec3 pMax = curNode->boxCenter + glm::vec3(curNode->scale / 2.0f); if (!(pMin.x < tmp_intersect.x && pMax.x > tmp_intersect.x && pMin.y < tmp_intersect.y && pMax.y > tmp_intersect.y && pMin.z < tmp_intersect.z && pMax.z > tmp_intersect.z)) { hit_geom_index = -1; } } }*/ if (hit_geom_index != -1 && intersections[path_index].t >= t_min) { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; intersections[path_index].uv = uv; intersections[path_index].isMesh = finalMesh; } if (curNode->childCount == 0) top--; else { for (int i = childStack[top]; i < 8; i++) { if (curNode->hasChild[i]) { childStack[top] = i + 1; top++; octStack[top] = curNode->nodeIndices[i]; childStack[top] = 0; break; } } } } else { top--; } } else { if (childStack[top] == 8) { top--; } else { for (int i = childStack[top]; i < 8; i++) { if (curNode->hasChild[i]) { childStack[top] = i + 1; top++; octStack[top] = curNode->nodeIndices[i]; childStack[top] = 0; break; } else if (i == 7) { childStack[top] = 8; } } } } } while (top >= 0); if (intersections[path_index].t == FLT_MAX) { intersections[path_index].t = -1.0f; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeMaterial( int iter , int num_paths , ShadeableIntersection* shadeableIntersections , PathSegment* pathSegments , Material* materials , hipTextureObject_t* cudaTexes , int materialSize , example::Material* gltfMaterials , glm::vec2* texDim ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material; bool isMesh = false; if (intersection.materialId > materialSize - 1) { isMesh = true; example::Material curMeshMaterial = gltfMaterials[intersection.materialId - materialSize]; int baseColorIndex = curMeshMaterial.base_texid; int normalIndex = curMeshMaterial.normal_texid; int metallicIndex = curMeshMaterial.metallic_roughness_texid; if (baseColorIndex != -1) { float width = texDim[baseColorIndex].x; float height = texDim[baseColorIndex].y; float4 color = tex2D<float4>(cudaTexes[baseColorIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); material.color = glm::vec3(color.x / 255.0f, color.y / 255.0f, color.z / 255.0f); material.specular.color = glm::vec3(color.x / 255.0f, color.y / 255.0f, color.z / 255.0f); } else { material.color = glm::vec3(0.98f, 0.98f, 0.98f); } if (normalIndex != -1) { float width = texDim[normalIndex].x; float height = texDim[normalIndex].y; float4 normal = tex2D<float4>(cudaTexes[normalIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); intersection.surfaceNormal = glm::vec3((normal.x - 128.0f) / 128.0f, (normal.y - 128.0f) / 128.0f, (normal.z - 128.0f) / 128.0f); } if (metallicIndex != -1) { float width = texDim[normalIndex].x; float height = texDim[normalIndex].y; float4 metallic = tex2D<float4>(cudaTexes[metallicIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); material.hasReflective = metallic.y / 255.0f; } material.hasRefractive = 0; material.emittance = 0; } else { isMesh = false; material = materials[intersection.materialId]; } glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { scatterRay(pathSegments[idx], getPointOnRay(pathSegments[idx].ray, intersection.t), intersection.surfaceNormal, material, rng, isMesh); } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } __global__ void directLightShadeMaterial( int iter , int num_paths , ShadeableIntersection* shadeableIntersections , PathSegment* pathSegments , Material* materials , Geom* lights , int lightNum , hipTextureObject_t* cudaTexes , int materialSize , example::Material* gltfMaterials , glm::vec2* texDim ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material; if (intersection.materialId > materialSize - 1) { example::Material curMeshMaterial = gltfMaterials[intersection.materialId - materialSize]; int baseColorIndex = curMeshMaterial.base_texid; int normalIndex = curMeshMaterial.normal_texid; int metallicIndex = curMeshMaterial.metallic_roughness_texid; if (baseColorIndex != -1) { float width = texDim[baseColorIndex].x; float height = texDim[baseColorIndex].y; float4 color = tex2D<float4>(cudaTexes[baseColorIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); material.color = glm::vec3(color.x / 255.0f, color.y / 255.0f, color.z / 255.0f); } else { material.color = glm::vec3(0.98f, 0.98f, 0.98f); } if (normalIndex != -1) { float width = texDim[normalIndex].x; float height = texDim[normalIndex].y; float4 normal = tex2D<float4>(cudaTexes[normalIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); intersection.surfaceNormal = glm::vec3((normal.x - 128.0f) / 128.0f, (normal.y - 128.0f) / 128.0f, (normal.z - 128.0f) / 128.0f); } if (metallicIndex != -1) { float width = texDim[normalIndex].x; float height = texDim[normalIndex].y; float4 metallic = tex2D<float4>(cudaTexes[metallicIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); //material.hasReflective = metallic.x / 255.0f; } material.hasReflective = 0; material.hasRefractive = 0; material.emittance = 0; } else { material = materials[intersection.materialId]; } //material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { if (pathSegments[idx].remainingBounces != 1) { pathSegments[idx].color *= (materialColor * material.emittance); } else { pathSegments[idx].color *= (materialColor * material.emittance) / glm::length2(getPointOnRay(pathSegments[idx].ray, intersection.t) - pathSegments[idx].ray.origin) * fabs(glm::dot(intersection.surfaceNormal, pathSegments[idx].ray.direction)); } pathSegments[idx].remainingBounces = 0; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { if (pathSegments[idx].remainingBounces == 1) { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } else { pathSegments[idx].color = glm::vec3(0.0f); directRay(pathSegments[idx], getPointOnRay(pathSegments[idx].ray, intersection.t), intersection.surfaceNormal, material, rng, lights, lightNum); } } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; #ifdef ANTIALIASING iterationPath.color /= AASAMPLENUM; #endif image[iterationPath.pixelIndex] += iterationPath.color; } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera& cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; int sampleCount = pixelcount; #ifdef ANTIALIASING sampleCount *= AASAMPLENUM; #endif // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); #ifdef ANTIALIASING const dim3 blocksPerGrid2d( (cam.resolution.x * sqrt(AASAMPLENUM) + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y * sqrt(AASAMPLENUM)+ blockSize2d.y - 1) / blockSize2d.y); #else const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); #endif // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); hipEvent_t event_start = nullptr; hipEvent_t event_end = nullptr; hipEventCreate(&event_start); hipEventCreate(&event_end); int depth = 0; PathSegment* dev_path_end = dev_paths + sampleCount; //the tail of path segment array int num_paths = dev_path_end - dev_paths; //is that the same as pixel count? -- no when antialiasing, do we need to change? int num_cur_paths = num_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; bool firstIteration = true; hipEventRecord(event_start); while (!iterationComplete) { // clean shading chunks hipMemset(dev_intersections, 0, sampleCount * sizeof(ShadeableIntersection)); dim3 numblocksPathSegmentTracing = (num_cur_paths + blockSize1d - 1) / blockSize1d; if (CACHEBOUNCE && depth == 0) { if (iter == 1) { #ifdef OCTREEACCEL rayOctreeIntersect << <numblocksPathSegmentTracing, blockSize1d >> >( depth , num_paths , dev_paths , dev_geoms , dev_bounding_box , hst_scene->geoms.size() , dev_intersections , dev_mesh_pos , dev_mesh_nor , dev_mesh_idx , dev_mesh_uv , dev_octree , dev_octreeNode , dev_primsForOcts , dev_meshTriForOcts); #else // tracing computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , dev_bounding_box , hst_scene->geoms.size() , dev_intersections , dev_mesh_pos , dev_mesh_nor , dev_mesh_idx , dev_mesh_uv ); #endif checkCUDAError("trace one bounce"); hipDeviceSynchronize(); depth++; hipMemcpy(dev_paths_cache, dev_paths, sampleCount * sizeof(PathSegment), hipMemcpyDeviceToDevice); hipMemcpy(dev_intersections_cache, dev_intersections, sampleCount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); } else { hipMemcpy(dev_paths, dev_paths_cache, sampleCount * sizeof(PathSegment), hipMemcpyDeviceToDevice); hipMemcpy(dev_intersections, dev_intersections_cache, sampleCount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); depth++; } } else { #ifdef OCTREEACCEL rayOctreeIntersect << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , dev_bounding_box , hst_scene->geoms.size() , dev_intersections , dev_mesh_pos , dev_mesh_nor , dev_mesh_idx , dev_mesh_uv , dev_octree , dev_octreeNode , dev_primsForOcts , dev_meshTriForOcts); #else computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , dev_bounding_box , hst_scene->geoms.size() , dev_intersections , dev_mesh_pos , dev_mesh_nor , dev_mesh_idx , dev_mesh_uv ); #endif checkCUDAError("trace one bounce"); hipDeviceSynchronize(); depth++; } // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. // Sort Path with Matrial ID thrust::stable_sort_by_key(thrust::device, dev_intersections, dev_intersections + num_cur_paths, dev_paths, material_sort()); #ifdef DIRECTLIGHTING directLightShadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > ( iter, num_paths, dev_intersections, dev_paths, dev_materials, dev_lights, lightLen, dev_cudaTextures, materialSize, dev_gltfMateiral, dev_texDim ); #else shadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > ( iter, num_paths, dev_intersections, dev_paths, dev_materials, dev_cudaTextures, materialSize, dev_gltfMateiral, dev_texDim ); #endif hipDeviceSynchronize(); // TODO: should be based off stream compaction results, and even shot more rays // update the dev_path and num_paths dev_path_end = thrust::partition( thrust::device_ptr<PathSegment>(dev_paths), thrust::device_ptr<PathSegment>(dev_path_end), is_terminated()).get(); //dev_path_end = thrust::remove_if(thrust::device, dev_paths, dev_path_end, is_terminated()); num_cur_paths = dev_path_end - dev_paths; if ((depth >= traceDepth) || num_cur_paths == 0) iterationComplete = true; } // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (sampleCount + blockSize1d - 1) / blockSize1d; finalGather << <numBlocksPixels, blockSize1d >> > (num_paths, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// const dim3 pixelsPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // Send results to OpenGL buffer for rendering sendImageToPBO << <pixelsPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image); hipEventRecord(event_end); hipEventSynchronize(event_end); float curIterTime = 0.0f; hipEventElapsedTime(&curIterTime, event_start, event_end); gpu_time_300_iter += curIterTime; if (iter == 300) { std::cout << "300 Iter Elapse Time: " << gpu_time_300_iter << "ms"; } // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
46ff50c9ee5865fd180156ccfa6a9044f8ee6868.cu
#include <cstdio> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> #include <thrust/partition.h> #include <thrust/random.h> #include <thrust/remove.h> #include <glm/gtc/matrix_inverse.hpp> #include <glm/gtc/matrix_transform.hpp> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #include "macros.h" static float gpu_time_300_iter = 0.0f; #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static int materialSize = 0; static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static Geom * dev_lights = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; static PathSegment* dev_paths_cache = NULL; static PathSegment* dev_intersections_cache = NULL; static BoundingBox* dev_bounding_box = NULL; static std::vector<cudaTextureObject_t> cudaTextures; static std::vector<cudaArray*> cudaTextureData; static glm::vec2* dev_texDim = NULL; static cudaTextureObject_t* dev_cudaTextures = NULL; static example::Material* dev_gltfMateiral = NULL; static Octree* dev_octree = NULL; static OctreeNode* dev_octreeNode = NULL; static int* dev_primsForOcts = NULL; static int* dev_meshTriForOcts = NULL; static int lightLen = 0; const static int depthSize = 0; cudaTextureObject_t texTest; // Mesh Loading static float* dev_mesh_pos = NULL; static float* dev_mesh_nor = NULL; static int* dev_mesh_idx = NULL; static float* dev_mesh_uv = NULL; // TODO: static variables for device memory, any extra info you need, etc // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; int sampleCount = pixelcount; materialSize = scene->materials.size(); #ifdef ANTIALIASING sampleCount *= AASAMPLENUM; #endif lightLen = scene->lights.size(); cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_paths, sampleCount * sizeof(PathSegment)); cudaMalloc(&dev_paths_cache, sampleCount * sizeof(PathSegment)); cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); cudaMalloc(&dev_lights, scene->lights.size() * sizeof(Geom)); cudaMemcpy(dev_lights, scene->lights.data(), scene->lights.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_bounding_box, scene->boundingBoxes.size() * sizeof(BoundingBox)); cudaMemcpy(dev_bounding_box, scene->boundingBoxes.data(), scene->boundingBoxes.size() * sizeof(BoundingBox), cudaMemcpyHostToDevice); cudaMalloc(&dev_intersections, sampleCount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, sampleCount * sizeof(ShadeableIntersection)); cudaMalloc(&dev_intersections_cache, sampleCount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections_cache, 0, sampleCount * sizeof(ShadeableIntersection)); cudaMalloc(&dev_gltfMateiral, scene->gltfMaterials.size() * sizeof(example::Material)); cudaMemcpy(dev_gltfMateiral, scene->gltfMaterials.data(), scene->gltfMaterials.size() * sizeof(example::Material), cudaMemcpyHostToDevice); cudaMalloc(&dev_octree, sizeof(scene->octree)); cudaMemcpy(dev_octree, &scene->octree, sizeof(scene->octree), cudaMemcpyHostToDevice); cudaMalloc(&dev_octreeNode, sizeof(OctreeNode) * scene->octree.nodeData.size()); cudaMemcpy(dev_octreeNode, scene->octree.nodeData.data(), sizeof(OctreeNode) * scene->octree.nodeData.size(), cudaMemcpyHostToDevice); // TODO: initialize any extra device memeory you need cudaMalloc(&dev_mesh_idx, scene->faceCount * 3 * sizeof(int)); cudaMalloc(&dev_mesh_pos, scene->posCount * sizeof(float)); cudaMalloc(&dev_mesh_nor, scene->faceCount * 3 * 3 * sizeof(float)); cudaMalloc(&dev_mesh_uv, scene->faceCount * 2 * 3 * sizeof(float)); int curOffset = 0; int curPosOffset = 0; for (int i = 0; i < scene->meshes.size(); i++) { for (int j = 0; j < scene->meshes.at(i).size(); j++) { int stride = scene->meshes.at(i).at(j).faces.size() / (scene->meshes.at(i).at(j).stride / sizeof(float)); int curPosNum = scene->meshes.at(i).at(j).vertices.size(); int final = scene->meshes.at(i).at(j).faces.at(stride * 3 - 1); cudaMemcpy(dev_mesh_idx + curOffset * 3, scene->meshes.at(i).at(j).faces.data(), stride * 3 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_mesh_pos + curPosOffset, scene->meshes.at(i).at(j).vertices.data(), curPosNum * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_mesh_nor + curOffset * 3 * 3, scene->meshes.at(i).at(j).facevarying_normals.data(), stride * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice); //Load UV cudaMemcpy(dev_mesh_uv + curOffset * 2 * 3, scene->meshes.at(i).at(j).facevarying_uvs.data(), stride * 2 * 3 * sizeof(float), cudaMemcpyHostToDevice); curOffset += stride; curPosOffset += curPosNum; } } // Load Prim and MeshTri Oct Data cudaMalloc(&dev_primsForOcts, scene->octree.primitiveCount * sizeof(int)); cudaMalloc(&dev_meshTriForOcts, scene->octree.meshTriCount * sizeof(int)); int curPrimOffset = 0; int curMeshTriOffset = 0; for (int i = 0; i < scene->octree.nodeData.size(); i++) { cudaMemcpy(dev_primsForOcts + curPrimOffset, scene->octree.nodeData.at(i).primitiveIndices.data(), sizeof(int) * scene->octree.nodeData.at(i).primitiveCount, cudaMemcpyHostToDevice); cudaMemcpy(dev_meshTriForOcts + curMeshTriOffset, scene->octree.nodeData.at(i).meshTriangleIndices.data(), sizeof(int) * scene->octree.nodeData.at(i).meshTriCount, cudaMemcpyHostToDevice); curPrimOffset += scene->octree.nodeData.at(i).primitiveCount; curMeshTriOffset += scene->octree.nodeData.at(i).meshTriCount; } // Load Textures int count = 0; std::vector<glm::vec2> texDim; for (const auto &tex : scene->gltfTextures) { float4* texTmp = new float4[tex.height * tex.width]; for (int i = 0; i < tex.height * tex.width; i++) { texTmp[i].x = (float)tex.image[4 * i]; texTmp[i].y = (float)tex.image[4 * i + 1]; texTmp[i].z = (float)tex.image[4 * i + 2]; texTmp[i].w = (float)tex.image[4 * i + 3]; } cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>(); // Load Data cudaArray* cuArray; cudaMallocArray(&cuArray, &channelDesc, tex.width, tex.height); cudaMemcpyToArray(cuArray, 0, 0, texTmp, tex.height * tex.width * sizeof(float4), cudaMemcpyHostToDevice); // Specify Texture struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = cuArray; delete []texTmp; // Specify texture object parameters struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); switch (tex.sampler.wrapS) { case CLAMP_TO_EDGE: texDesc.addressMode[0] = cudaAddressModeClamp; break; case MIRRORED_REPEAT: texDesc.addressMode[0] = cudaAddressModeMirror; break; case REPEAT: texDesc.addressMode[0] = cudaAddressModeWrap; break; } switch (tex.sampler.wrapT) { case CLAMP_TO_EDGE: texDesc.addressMode[1] = cudaAddressModeClamp; break; case MIRRORED_REPEAT: texDesc.addressMode[1] = cudaAddressModeMirror; break; case REPEAT: texDesc.addressMode[1] = cudaAddressModeWrap; break; } switch (tex.sampler.minFilter) { case NEAREST: case NEAREST_MIPMAP_NEAREST: case NEAREST_MIPMAP_LINEAR: texDesc.filterMode = cudaFilterModePoint; break; case LINEAR: case LINEAR_MIPMAP_NEAREST: case LINEAR_MIPMAP_LINEAR: texDesc.filterMode = cudaFilterModeLinear; break; } texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords = 0; cudaTextureObject_t texObj = 0; cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL); cudaTextureData.push_back(cuArray); cudaTextures.push_back(texObj); texDim.push_back(glm::vec2(tex.width, tex.height)); count++; } cudaMalloc(&dev_texDim, texDim.size() * sizeof(glm::vec2)); cudaMemcpy(dev_texDim, texDim.data(), texDim.size() * sizeof(glm::vec2), cudaMemcpyHostToDevice); cudaMalloc(&dev_cudaTextures, cudaTextures.size() * sizeof(cudaTextureObject_t)); cudaMemcpy(dev_cudaTextures, cudaTextures.data(), cudaTextures.size() * sizeof(cudaTextureObject_t), cudaMemcpyHostToDevice); checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_paths); cudaFree(dev_paths_cache); cudaFree(dev_geoms); cudaFree(dev_materials); cudaFree(dev_lights); cudaFree(dev_intersections); cudaFree(dev_intersections_cache); cudaFree(dev_gltfMateiral); cudaFree(dev_texDim); // TODO: clean up any extra device memory you created cudaFree(dev_mesh_idx); cudaFree(dev_mesh_nor); cudaFree(dev_mesh_pos); cudaFree(dev_mesh_uv); // Octree cudaFree(dev_octreeNode); cudaFree(dev_octree); for (int i = 0; i < cudaTextureData.size(); i++) { cudaFreeArray(cudaTextureData.at(i)); } cudaTextureData.clear(); cudaTextures.clear(); cudaFree(dev_cudaTextures); checkCUDAError("pathtraceFree"); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int aaDim = sqrtf(AASAMPLENUM); int index = x + (y * cam.resolution.x * aaDim); x /= aaDim; y /= aaDim; if (x < cam.resolution.x && y < cam.resolution.y) { PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); // TODO: implement antialiasing by jittering the ray thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, traceDepth); thrust::uniform_real_distribution<float> u01(0, 1); #ifdef ANTIALIASING segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x + u01(rng) - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y + u01(rng) - (float)cam.resolution.y * 0.5f) ); #else segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) ); #endif #ifdef DEPTHOFFIELD // Depth of Field float lenX = u01(rng); float lenY = u01(rng); glm::vec2 pLens = cam.lensRadius * concentricSampling(glm::vec2(lenX, lenY)); float ft = cam.focalDistance / segment.ray.direction.z; glm::vec3 pFocus = segment.ray.origin + cam.focalDistance * segment.ray.direction; segment.ray.origin += glm::vec3(pLens.x, pLens.y, 0.0f); segment.ray.direction = glm::normalize(pFocus - segment.ray.origin); #endif segment.pixelIndex = x + y * cam.resolution.x; segment.remainingBounces = traceDepth; } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , BoundingBox* bbs , int geoms_size , ShadeableIntersection * intersections , float* meshPos , float* meshNor , int* meshIdx , float* meshUV ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; glm::vec2 uv; glm::vec3 tangent; glm::vec3 bitangent; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; bool finalMesh = false; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; glm::vec2 tmp_uv; glm::vec3 tmp_tangent; glm::vec3 tmp_bitangent; intersections[path_index].isMesh = false; // naive parse through global geoms float isMesh = false; for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); isMesh = false; } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); isMesh = false; } else if (geom.type == MESH) { #ifdef BOUNDINGBOX // Bounding Box Test Ray modelRay; modelRay.origin = multiplyMV(geom.inverseTransform, glm::vec4(pathSegment.ray.origin, 1.0f)); modelRay.direction = glm::normalize(multiplyMV(geom.inverseTransform, glm::vec4(pathSegment.ray.direction, 0.0f))); Geom boundingBox; boundingBox.type = CUBE; int boundingIdx = geom.boundingIdx; boundingBox.translation = bbs[boundingIdx].boundingCenter; boundingBox.scale = bbs[boundingIdx].boundingScale; boundingBox.rotation = glm::vec3(0.0f, 0.0f, 0.0f); glm::mat4 translationMat = glm::translate(glm::mat4(), glm::vec3(0.0f, 0.0f, 0.0f)); glm::mat4 rotationMat = glm::rotate(glm::mat4(), boundingBox.rotation.x * (float)PI / 180, glm::vec3(1, 0, 0)); rotationMat = rotationMat * glm::rotate(glm::mat4(), boundingBox.rotation.y * (float)PI / 180, glm::vec3(0, 1, 0)); rotationMat = rotationMat * glm::rotate(glm::mat4(), boundingBox.rotation.z * (float)PI / 180, glm::vec3(0, 0, 1)); glm::mat4 scaleMat = glm::scale(glm::mat4(), boundingBox.scale); boundingBox.transform = translationMat * rotationMat * scaleMat; boundingBox.inverseTransform = glm::inverse(boundingBox.transform); boundingBox.invTranspose = glm::inverseTranspose(boundingBox.transform); glm::vec3 bond_intersect = glm::vec3(0.0f); glm::vec3 bond_normal = glm::vec3(0.0f); bool bond_outside = true; t = boxIntersectionTest(boundingBox, modelRay, bond_intersect, bond_normal, bond_outside); /*t = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, meshPos, meshNor, meshIdx, geom.faceNum, geom.offset);*/ if (t != -1) { t = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, tmp_uv, outside, tmp_tangent, tmp_bitangent, meshPos, meshNor, meshIdx, meshUV, geom.faceNum, geom.offset, geom.posOffset); } #else t = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, tmp_uv, outside, tmp_tangent, tmp_bitangent, meshPos, meshNor, meshIdx, meshUV, geom.faceNum, geom.offset, geom.posOffset); #endif isMesh = true; } // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { if (isMesh) { finalMesh = true; uv = tmp_uv; tangent = tmp_tangent; bitangent = tmp_bitangent; } else { finalMesh = false; } t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; intersections[path_index].uv = uv; intersections[path_index].isMesh = finalMesh; intersections[path_index].surfaceTangent = tangent; intersections[path_index].surfaceBiTangent = bitangent; } } } __global__ void rayOctreeIntersect( int depth , int num_paths , PathSegment* pathSegments , Geom* geoms , BoundingBox* bbs , int geoms_size , ShadeableIntersection* intersections , float* meshPos , float* meshNor , int* meshIdx , float* meshUV , Octree* octTree , OctreeNode* octreeNode , int* primOct , int* meshTriOct) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; int octStack[OCT_MAX_DEPTH + 1]; int childStack[OCT_MAX_DEPTH + 1]; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; intersections[path_index].isMesh = false; intersections[path_index].t = FLT_MAX; int top = 0; octStack[top] = 0; childStack[top] = 0; OctreeNode* curNode; do { int curNodeIndex = octStack[top]; OctreeNode* curNode = &(octreeNode[curNodeIndex]); //First Coming if (childStack[top] == 0) { glm::vec3 bond_intersect = glm::vec3(0.0f); glm::vec3 bond_normal = glm::vec3(0.0f); bool bond_outside = true; int octBoxT = boxIntersectionTest(curNode->octBlock, pathSegment.ray, bond_intersect, bond_normal, bond_outside); if (octBoxT != -1) { float geomT; glm::vec3 intersect_point; glm::vec3 normal; glm::vec2 uv; glm::vec3 tangent; glm::vec3 bitangent; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; bool finalMesh = false; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; glm::vec2 tmp_uv; glm::vec3 tmp_tangent; glm::vec3 tmp_bitangent; intersections[path_index].isMesh = false; // naive parse through global geoms float isMesh = false; for (int i = 0; i < curNode->primitiveCount; i++) { int geomIdx = primOct[curNode->primOffset + i]; Geom& geom = geoms[geomIdx]; if (geom.type == CUBE) { geomT = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); isMesh = false; } else if (geom.type == SPHERE) { geomT = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); isMesh = false; } else if (geom.type == MESH) { geomT = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, tmp_uv, outside, tmp_tangent, tmp_bitangent, meshPos, meshNor, meshIdx, meshUV, geom.faceNum, geom.offset, geom.posOffset); isMesh = true; } // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (geomT > 0.0f && t_min >= geomT) { if (isMesh) { finalMesh = true; uv = tmp_uv; tangent = tmp_tangent; bitangent = tmp_bitangent; } else { finalMesh = false; } t_min = geomT; hit_geom_index = geomIdx; intersect_point = tmp_intersect; normal = tmp_normal; glm::vec3 pMin = curNode->boxCenter - glm::vec3(curNode->scale / 2.0f); glm::vec3 pMax = curNode->boxCenter + glm::vec3(curNode->scale / 2.0f); if (!(pMin.x < tmp_intersect.x && pMax.x > tmp_intersect.x && pMin.y < tmp_intersect.y && pMax.y > tmp_intersect.y && pMin.z < tmp_intersect.z && pMax.z > tmp_intersect.z)) { hit_geom_index = -1; } } } /*for (int i = 0; i < curNode->meshTriCount; i++) { int faceIdx = meshTriOct[curNode->meshTriOffset + i]; Geom meshGeom; int meshGeomIdx = -1; for (int j = 0; j < geoms_size; j++) { if (geoms[j].type != MESH) continue; else if (faceIdx < geoms[j].offset) { meshGeom = geoms[j - 1]; meshGeomIdx = j - 1; } } float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; bool finalMesh = false; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; glm::vec2 tmp_uv; glm::vec3 tmp_tangent; glm::vec3 tmp_bitangent; triIntersectionTest(meshGeom, faceIdx, pathSegment.ray, tmp_intersect, tmp_normal, tmp_uv, outside, tmp_tangent, tmp_bitangent, meshPos, meshNor, meshIdx, meshUV); // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (geomT > 0.0f && t_min >= geomT) { if (isMesh) { finalMesh = true; uv = tmp_uv; tangent = tmp_tangent; bitangent = tmp_bitangent; } else { finalMesh = false; } t_min = geomT; hit_geom_index = meshGeomIdx; intersect_point = tmp_intersect; normal = tmp_normal; glm::vec3 pMin = curNode->boxCenter - glm::vec3(curNode->scale / 2.0f); glm::vec3 pMax = curNode->boxCenter + glm::vec3(curNode->scale / 2.0f); if (!(pMin.x < tmp_intersect.x && pMax.x > tmp_intersect.x && pMin.y < tmp_intersect.y && pMax.y > tmp_intersect.y && pMin.z < tmp_intersect.z && pMax.z > tmp_intersect.z)) { hit_geom_index = -1; } } }*/ if (hit_geom_index != -1 && intersections[path_index].t >= t_min) { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; intersections[path_index].uv = uv; intersections[path_index].isMesh = finalMesh; } if (curNode->childCount == 0) top--; else { for (int i = childStack[top]; i < 8; i++) { if (curNode->hasChild[i]) { childStack[top] = i + 1; top++; octStack[top] = curNode->nodeIndices[i]; childStack[top] = 0; break; } } } } else { top--; } } else { if (childStack[top] == 8) { top--; } else { for (int i = childStack[top]; i < 8; i++) { if (curNode->hasChild[i]) { childStack[top] = i + 1; top++; octStack[top] = curNode->nodeIndices[i]; childStack[top] = 0; break; } else if (i == 7) { childStack[top] = 8; } } } } } while (top >= 0); if (intersections[path_index].t == FLT_MAX) { intersections[path_index].t = -1.0f; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeMaterial( int iter , int num_paths , ShadeableIntersection* shadeableIntersections , PathSegment* pathSegments , Material* materials , cudaTextureObject_t* cudaTexes , int materialSize , example::Material* gltfMaterials , glm::vec2* texDim ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material; bool isMesh = false; if (intersection.materialId > materialSize - 1) { isMesh = true; example::Material curMeshMaterial = gltfMaterials[intersection.materialId - materialSize]; int baseColorIndex = curMeshMaterial.base_texid; int normalIndex = curMeshMaterial.normal_texid; int metallicIndex = curMeshMaterial.metallic_roughness_texid; if (baseColorIndex != -1) { float width = texDim[baseColorIndex].x; float height = texDim[baseColorIndex].y; float4 color = tex2D<float4>(cudaTexes[baseColorIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); material.color = glm::vec3(color.x / 255.0f, color.y / 255.0f, color.z / 255.0f); material.specular.color = glm::vec3(color.x / 255.0f, color.y / 255.0f, color.z / 255.0f); } else { material.color = glm::vec3(0.98f, 0.98f, 0.98f); } if (normalIndex != -1) { float width = texDim[normalIndex].x; float height = texDim[normalIndex].y; float4 normal = tex2D<float4>(cudaTexes[normalIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); intersection.surfaceNormal = glm::vec3((normal.x - 128.0f) / 128.0f, (normal.y - 128.0f) / 128.0f, (normal.z - 128.0f) / 128.0f); } if (metallicIndex != -1) { float width = texDim[normalIndex].x; float height = texDim[normalIndex].y; float4 metallic = tex2D<float4>(cudaTexes[metallicIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); material.hasReflective = metallic.y / 255.0f; } material.hasRefractive = 0; material.emittance = 0; } else { isMesh = false; material = materials[intersection.materialId]; } glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { scatterRay(pathSegments[idx], getPointOnRay(pathSegments[idx].ray, intersection.t), intersection.surfaceNormal, material, rng, isMesh); } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } __global__ void directLightShadeMaterial( int iter , int num_paths , ShadeableIntersection* shadeableIntersections , PathSegment* pathSegments , Material* materials , Geom* lights , int lightNum , cudaTextureObject_t* cudaTexes , int materialSize , example::Material* gltfMaterials , glm::vec2* texDim ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material; if (intersection.materialId > materialSize - 1) { example::Material curMeshMaterial = gltfMaterials[intersection.materialId - materialSize]; int baseColorIndex = curMeshMaterial.base_texid; int normalIndex = curMeshMaterial.normal_texid; int metallicIndex = curMeshMaterial.metallic_roughness_texid; if (baseColorIndex != -1) { float width = texDim[baseColorIndex].x; float height = texDim[baseColorIndex].y; float4 color = tex2D<float4>(cudaTexes[baseColorIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); material.color = glm::vec3(color.x / 255.0f, color.y / 255.0f, color.z / 255.0f); } else { material.color = glm::vec3(0.98f, 0.98f, 0.98f); } if (normalIndex != -1) { float width = texDim[normalIndex].x; float height = texDim[normalIndex].y; float4 normal = tex2D<float4>(cudaTexes[normalIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); intersection.surfaceNormal = glm::vec3((normal.x - 128.0f) / 128.0f, (normal.y - 128.0f) / 128.0f, (normal.z - 128.0f) / 128.0f); } if (metallicIndex != -1) { float width = texDim[normalIndex].x; float height = texDim[normalIndex].y; float4 metallic = tex2D<float4>(cudaTexes[metallicIndex], (intersection.uv.x) * width, (intersection.uv.y) * height); //material.hasReflective = metallic.x / 255.0f; } material.hasReflective = 0; material.hasRefractive = 0; material.emittance = 0; } else { material = materials[intersection.materialId]; } //material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { if (pathSegments[idx].remainingBounces != 1) { pathSegments[idx].color *= (materialColor * material.emittance); } else { pathSegments[idx].color *= (materialColor * material.emittance) / glm::length2(getPointOnRay(pathSegments[idx].ray, intersection.t) - pathSegments[idx].ray.origin) * fabs(glm::dot(intersection.surfaceNormal, pathSegments[idx].ray.direction)); } pathSegments[idx].remainingBounces = 0; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { if (pathSegments[idx].remainingBounces == 1) { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } else { pathSegments[idx].color = glm::vec3(0.0f); directRay(pathSegments[idx], getPointOnRay(pathSegments[idx].ray, intersection.t), intersection.surfaceNormal, material, rng, lights, lightNum); } } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; #ifdef ANTIALIASING iterationPath.color /= AASAMPLENUM; #endif image[iterationPath.pixelIndex] += iterationPath.color; } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera& cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; int sampleCount = pixelcount; #ifdef ANTIALIASING sampleCount *= AASAMPLENUM; #endif // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); #ifdef ANTIALIASING const dim3 blocksPerGrid2d( (cam.resolution.x * sqrt(AASAMPLENUM) + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y * sqrt(AASAMPLENUM)+ blockSize2d.y - 1) / blockSize2d.y); #else const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); #endif // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); cudaEvent_t event_start = nullptr; cudaEvent_t event_end = nullptr; cudaEventCreate(&event_start); cudaEventCreate(&event_end); int depth = 0; PathSegment* dev_path_end = dev_paths + sampleCount; //the tail of path segment array int num_paths = dev_path_end - dev_paths; //is that the same as pixel count? -- no when antialiasing, do we need to change? int num_cur_paths = num_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; bool firstIteration = true; cudaEventRecord(event_start); while (!iterationComplete) { // clean shading chunks cudaMemset(dev_intersections, 0, sampleCount * sizeof(ShadeableIntersection)); dim3 numblocksPathSegmentTracing = (num_cur_paths + blockSize1d - 1) / blockSize1d; if (CACHEBOUNCE && depth == 0) { if (iter == 1) { #ifdef OCTREEACCEL rayOctreeIntersect << <numblocksPathSegmentTracing, blockSize1d >> >( depth , num_paths , dev_paths , dev_geoms , dev_bounding_box , hst_scene->geoms.size() , dev_intersections , dev_mesh_pos , dev_mesh_nor , dev_mesh_idx , dev_mesh_uv , dev_octree , dev_octreeNode , dev_primsForOcts , dev_meshTriForOcts); #else // tracing computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , dev_bounding_box , hst_scene->geoms.size() , dev_intersections , dev_mesh_pos , dev_mesh_nor , dev_mesh_idx , dev_mesh_uv ); #endif checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); depth++; cudaMemcpy(dev_paths_cache, dev_paths, sampleCount * sizeof(PathSegment), cudaMemcpyDeviceToDevice); cudaMemcpy(dev_intersections_cache, dev_intersections, sampleCount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); } else { cudaMemcpy(dev_paths, dev_paths_cache, sampleCount * sizeof(PathSegment), cudaMemcpyDeviceToDevice); cudaMemcpy(dev_intersections, dev_intersections_cache, sampleCount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); depth++; } } else { #ifdef OCTREEACCEL rayOctreeIntersect << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , dev_bounding_box , hst_scene->geoms.size() , dev_intersections , dev_mesh_pos , dev_mesh_nor , dev_mesh_idx , dev_mesh_uv , dev_octree , dev_octreeNode , dev_primsForOcts , dev_meshTriForOcts); #else computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , dev_bounding_box , hst_scene->geoms.size() , dev_intersections , dev_mesh_pos , dev_mesh_nor , dev_mesh_idx , dev_mesh_uv ); #endif checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); depth++; } // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. // Sort Path with Matrial ID thrust::stable_sort_by_key(thrust::device, dev_intersections, dev_intersections + num_cur_paths, dev_paths, material_sort()); #ifdef DIRECTLIGHTING directLightShadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > ( iter, num_paths, dev_intersections, dev_paths, dev_materials, dev_lights, lightLen, dev_cudaTextures, materialSize, dev_gltfMateiral, dev_texDim ); #else shadeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > ( iter, num_paths, dev_intersections, dev_paths, dev_materials, dev_cudaTextures, materialSize, dev_gltfMateiral, dev_texDim ); #endif cudaDeviceSynchronize(); // TODO: should be based off stream compaction results, and even shot more rays // update the dev_path and num_paths dev_path_end = thrust::partition( thrust::device_ptr<PathSegment>(dev_paths), thrust::device_ptr<PathSegment>(dev_path_end), is_terminated()).get(); //dev_path_end = thrust::remove_if(thrust::device, dev_paths, dev_path_end, is_terminated()); num_cur_paths = dev_path_end - dev_paths; if ((depth >= traceDepth) || num_cur_paths == 0) iterationComplete = true; } // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (sampleCount + blockSize1d - 1) / blockSize1d; finalGather << <numBlocksPixels, blockSize1d >> > (num_paths, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// const dim3 pixelsPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // Send results to OpenGL buffer for rendering sendImageToPBO << <pixelsPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image); cudaEventRecord(event_end); cudaEventSynchronize(event_end); float curIterTime = 0.0f; cudaEventElapsedTime(&curIterTime, event_start, event_end); gpu_time_300_iter += curIterTime; if (iter == 300) { std::cout << "300 Iter Elapse Time: " << gpu_time_300_iter << "ms"; } // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
901eb1f53f8bb9976c64d1698631bd2f23c32fd2.hip
// !!! This is a file automatically generated by hipify!!! #include "kernels/tensor_operators.h" #include "training/graph_group_sync.h" namespace marian { void SyncGraphGroup::setScheduler(Ptr<Scheduler> scheduler) { scheduler_ = scheduler; // optimizer has to be registered last to see changes of learning rate scheduler_->registerTrainingObserver(scheduler_); for(auto opt : shardOpt_) scheduler_->registerTrainingObserver(opt); } void SyncGraphGroup::updateMovingAverage(Tensor paramsAvg, Tensor params, size_t batches) { using namespace functional; float decay = ::max(mvDecay_, 1.f - (float)(batches + 1) / (float)(batches + 10)); Element(_1 = ((1.f - decay) * _1) + (decay * _2), paramsAvg, params); } void SyncGraphGroup::fetchParams(Tensor oldParams, const std::vector<Tensor>& params) { // @TODO read guard on parameters int pos = 0; std::vector<std::thread> threads; for(int idx = 0; idx < devices_.size(); idx++) { threads.emplace_back(std::thread( [=](int idx, int pos) { oldParams->subtensor(pos, params[idx]->size())->copyFrom(params[idx]); }, idx, pos)); pos += shardSize_; } for(auto&& t : threads) { t.join(); } } void SyncGraphGroup::execute(Ptr<data::Batch> batch) { std::vector<Ptr<data::Batch>> batches = batch->split(devices_.size()); if(first_) { for(size_t i = 0; i < graphs_.size(); ++i) { // takes care of thead_local stuff THREAD_GUARD(builders_[i]->build(graphs_[i], batches[0]); graphs_[i]->forward();); if(i > 0) graphs_[i]->params()->vals()->copyFrom(graphs_[0]->params()->vals()); } if(params_.size() == 0) { int totalSize = graphs_[0]->params()->vals()->size(); shardSize_ = ceil(totalSize / (float)devices_.size()); int pos = 0; for(auto device : devices_) { int __size__ = min(shardSize_, totalSize); auto paramsAlloc = New<TensorAllocator>(device); paramsAllocs_.push_back(paramsAlloc); paramsAlloc->reserveExact(3 * __size__ * sizeof(float)); Tensor param, grad, tmp; paramsAlloc->allocate(param, {1, __size__}); paramsAlloc->allocate(grad, {1, __size__}); paramsAlloc->allocate(tmp, {1, __size__}); params_.push_back(param); grads_.push_back(grad); tmpTensors_.push_back(tmp); param->copyFrom(graphs_[0]->params()->vals()->subtensor(pos, __size__)); pos += __size__; totalSize -= __size__; } } if(movingAvg_ && paramsAvg_.size() == 0) { int totalSize = graphs_[0]->params()->vals()->size(); int i = 0; for(auto device : devices_) { int __size__ = min(shardSize_, totalSize); totalSize -= __size__; Tensor paramAvg; auto allocator = New<TensorAllocator>(device); allocator->reserveExact(__size__ * sizeof(float)); allocator->allocate(paramAvg, {1, __size__}); paramAvg->copyFrom(params_[i++]); paramsAllocAvg_.push_back(allocator); paramsAvg_.push_back(paramAvg); } } first_ = false; } std::vector<float> costs(devices_.size()); { auto task = [this, &costs, batches](size_t idx) { auto graph = graphs_[idx]; auto batch = batches[idx]; if(batch->size() > 0) { auto costNode = builders_[idx]->build(graph, batch); graph->forward(); costs[idx] = costNode->scalar(); graph->backward(); } }; ThreadPool pool(devices_.size(), devices_.size()); for(int idx = 0; idx < batches.size(); ++idx) pool.enqueue(task, idx); } { auto task = [this, batches](size_t idx, int pos) { grads_[idx]->set(0); int size = params_[idx]->size(); int i = 0; for(auto graph : graphs_) { if(batches[i]->size() > 0) { auto subGrad = graph->params()->grads()->subtensor(pos, size); tmpTensors_[idx]->copyFrom(subGrad); using namespace functional; Element(_1 = _1 + _2, grads_[idx], tmpTensors_[idx]); } i++; } shardOpt_[idx]->update(params_[idx], grads_[idx]); if(movingAvg_) updateMovingAverage( paramsAvg_[idx], params_[idx], scheduler_->numberOfBatches()); for(auto graph : graphs_) { auto subParam = graph->params()->vals()->subtensor(pos, size); subParam->copyFrom(params_[idx]); } }; ThreadPool pool(devices_.size(), devices_.size()); int pos = 0; for(int idx = 0; idx < devices_.size(); ++idx) { pool.enqueue(task, idx, pos); pos += params_[idx]->size(); } } float cost = 0; for(auto c : costs) cost += c; cost = cost / costs.size(); if(scheduler_) { scheduler_->update(cost, batch); if(scheduler_->saving()) { this->save(); } if(scheduler_->validating()) { if(movingAvg_) for(auto graph : graphs_) fetchParams(graph->params()->vals(), paramsAvg_); // safe, because all graphs are idle during validation with sync sgd scheduler_->validate(graphs_); if(movingAvg_) for(auto graph : graphs_) fetchParams(graph->params()->vals(), params_); } } } }
901eb1f53f8bb9976c64d1698631bd2f23c32fd2.cu
#include "kernels/tensor_operators.h" #include "training/graph_group_sync.h" namespace marian { void SyncGraphGroup::setScheduler(Ptr<Scheduler> scheduler) { scheduler_ = scheduler; // optimizer has to be registered last to see changes of learning rate scheduler_->registerTrainingObserver(scheduler_); for(auto opt : shardOpt_) scheduler_->registerTrainingObserver(opt); } void SyncGraphGroup::updateMovingAverage(Tensor paramsAvg, Tensor params, size_t batches) { using namespace functional; float decay = std::max(mvDecay_, 1.f - (float)(batches + 1) / (float)(batches + 10)); Element(_1 = ((1.f - decay) * _1) + (decay * _2), paramsAvg, params); } void SyncGraphGroup::fetchParams(Tensor oldParams, const std::vector<Tensor>& params) { // @TODO read guard on parameters int pos = 0; std::vector<std::thread> threads; for(int idx = 0; idx < devices_.size(); idx++) { threads.emplace_back(std::thread( [=](int idx, int pos) { oldParams->subtensor(pos, params[idx]->size())->copyFrom(params[idx]); }, idx, pos)); pos += shardSize_; } for(auto&& t : threads) { t.join(); } } void SyncGraphGroup::execute(Ptr<data::Batch> batch) { std::vector<Ptr<data::Batch>> batches = batch->split(devices_.size()); if(first_) { for(size_t i = 0; i < graphs_.size(); ++i) { // takes care of thead_local stuff THREAD_GUARD(builders_[i]->build(graphs_[i], batches[0]); graphs_[i]->forward();); if(i > 0) graphs_[i]->params()->vals()->copyFrom(graphs_[0]->params()->vals()); } if(params_.size() == 0) { int totalSize = graphs_[0]->params()->vals()->size(); shardSize_ = ceil(totalSize / (float)devices_.size()); int pos = 0; for(auto device : devices_) { int __size__ = min(shardSize_, totalSize); auto paramsAlloc = New<TensorAllocator>(device); paramsAllocs_.push_back(paramsAlloc); paramsAlloc->reserveExact(3 * __size__ * sizeof(float)); Tensor param, grad, tmp; paramsAlloc->allocate(param, {1, __size__}); paramsAlloc->allocate(grad, {1, __size__}); paramsAlloc->allocate(tmp, {1, __size__}); params_.push_back(param); grads_.push_back(grad); tmpTensors_.push_back(tmp); param->copyFrom(graphs_[0]->params()->vals()->subtensor(pos, __size__)); pos += __size__; totalSize -= __size__; } } if(movingAvg_ && paramsAvg_.size() == 0) { int totalSize = graphs_[0]->params()->vals()->size(); int i = 0; for(auto device : devices_) { int __size__ = min(shardSize_, totalSize); totalSize -= __size__; Tensor paramAvg; auto allocator = New<TensorAllocator>(device); allocator->reserveExact(__size__ * sizeof(float)); allocator->allocate(paramAvg, {1, __size__}); paramAvg->copyFrom(params_[i++]); paramsAllocAvg_.push_back(allocator); paramsAvg_.push_back(paramAvg); } } first_ = false; } std::vector<float> costs(devices_.size()); { auto task = [this, &costs, batches](size_t idx) { auto graph = graphs_[idx]; auto batch = batches[idx]; if(batch->size() > 0) { auto costNode = builders_[idx]->build(graph, batch); graph->forward(); costs[idx] = costNode->scalar(); graph->backward(); } }; ThreadPool pool(devices_.size(), devices_.size()); for(int idx = 0; idx < batches.size(); ++idx) pool.enqueue(task, idx); } { auto task = [this, batches](size_t idx, int pos) { grads_[idx]->set(0); int size = params_[idx]->size(); int i = 0; for(auto graph : graphs_) { if(batches[i]->size() > 0) { auto subGrad = graph->params()->grads()->subtensor(pos, size); tmpTensors_[idx]->copyFrom(subGrad); using namespace functional; Element(_1 = _1 + _2, grads_[idx], tmpTensors_[idx]); } i++; } shardOpt_[idx]->update(params_[idx], grads_[idx]); if(movingAvg_) updateMovingAverage( paramsAvg_[idx], params_[idx], scheduler_->numberOfBatches()); for(auto graph : graphs_) { auto subParam = graph->params()->vals()->subtensor(pos, size); subParam->copyFrom(params_[idx]); } }; ThreadPool pool(devices_.size(), devices_.size()); int pos = 0; for(int idx = 0; idx < devices_.size(); ++idx) { pool.enqueue(task, idx, pos); pos += params_[idx]->size(); } } float cost = 0; for(auto c : costs) cost += c; cost = cost / costs.size(); if(scheduler_) { scheduler_->update(cost, batch); if(scheduler_->saving()) { this->save(); } if(scheduler_->validating()) { if(movingAvg_) for(auto graph : graphs_) fetchParams(graph->params()->vals(), paramsAvg_); // safe, because all graphs are idle during validation with sync sgd scheduler_->validate(graphs_); if(movingAvg_) for(auto graph : graphs_) fetchParams(graph->params()->vals(), params_); } } } }
22b71e98040216c3cbfb4d0a29dc0a4f20ff7b03.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Gaussian Elimination Function __global__ void ForwardElimKernel(double d_A[3][4], double d_piv[3], nDim); void GaussElim(double h_A[3][4], double h_b[3], double h_x[3], const int nDim) { double d_A[3][4], d_piv[3]; // Allocate memory on device //hipMalloc(d_A,sizeof(float)*(numvar)*(numvar+1)); //hipMalloc(d_piv,sizeof(float)*(numvar)*(numvar+1)); // Copy data from host to device //hipMemcpy(a_d, temp_h, sizeof(float)*numvar*(numvar+1),cudaMemcpyHostTo Device); // Define thread block size //dim3 dimBlock(numvar+1,numvar,1); //dim3 dimGrid(1,1,1); // Forward elimination kernel hipLaunchKernelGGL(( ForwardElimKernel), dim3(dimGrid) , dim3(dimBlock), 0, 0, d_A, d_piv, nDim); // Copy data from device to host //hipMemcpy(temp1_h,b_d,sizeof(float)*numvar*(numvar+1),cudaMemcpyDeviceT oHost); // Free memory on device hipFree(d_A); hipFree(d_piv); // Backward substitution for (int i = 0; nDim-1; i++) h_b[i] = h_A[i][nDim]; for (int i = nDim-1; i >= 0; i--) { h_x[i] = h_b[i]; for (int j = nDim-1; j >= i+1; j--) h_x[i] = h_x[i] - h_A[i][j]*h_x[j]; h_x[i] = h_x[i]/h_A[i][i]; } }
22b71e98040216c3cbfb4d0a29dc0a4f20ff7b03.cu
// Gaussian Elimination Function __global__ void ForwardElimKernel(double d_A[3][4], double d_piv[3], nDim); void GaussElim(double h_A[3][4], double h_b[3], double h_x[3], const int nDim) { double d_A[3][4], d_piv[3]; // Allocate memory on device //cudaMalloc(d_A,sizeof(float)*(numvar)*(numvar+1)); //cudaMalloc(d_piv,sizeof(float)*(numvar)*(numvar+1)); // Copy data from host to device //cudaMemcpy(a_d, temp_h, sizeof(float)*numvar*(numvar+1),cudaMemcpyHostTo Device); // Define thread block size //dim3 dimBlock(numvar+1,numvar,1); //dim3 dimGrid(1,1,1); // Forward elimination kernel ForwardElimKernel<<<dimGrid , dimBlock>>>(d_A, d_piv, nDim); // Copy data from device to host //cudaMemcpy(temp1_h,b_d,sizeof(float)*numvar*(numvar+1),cudaMemcpyDeviceT oHost); // Free memory on device cudaFree(d_A); cudaFree(d_piv); // Backward substitution for (int i = 0; nDim-1; i++) h_b[i] = h_A[i][nDim]; for (int i = nDim-1; i >= 0; i--) { h_x[i] = h_b[i]; for (int j = nDim-1; j >= i+1; j--) h_x[i] = h_x[i] - h_A[i][j]*h_x[j]; h_x[i] = h_x[i]/h_A[i][i]; } }
3eeb68ab1d24a5345bc33fe020468c690fa57be9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" #include <iostream> #include<cuda.h> #include "cuPrintf_hip.cuh" #include "cuPrintf.hip" using namespace std; using namespace cv; int noofclick=0; int startx,starty,endx,endy; int startx1,starty1; float alpha=100,beta=0; void CallBackFunc_1(int event, int x, int y, int flags, void* userdata) { if ( event == EVENT_LBUTTONDOWN ) { noofclick++; cout << "Left button of the mouse is clicked - position (" << x << ", " << y << ")" << endl; if(noofclick==2) { endx=x; endy=y; cvDestroyWindow("Image 1"); } else { startx=x; starty=y; } } } void CallBackFunc_2(int event, int x, int y, int flags, void* userdata) { if ( event == EVENT_LBUTTONDOWN ) { cout << "1Left button of the mouse is clicked - position (" << x << ", " << y << ")" << endl; startx1=x; starty1=y; cvDestroyWindow("Image 2"); } } __global__ void pyrup_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; const int color_tid = (xIndex)* aabhas + (3 * (yIndex)); const int color_tid1= (xIndex/2)* colorWidthStep + (3 * (yIndex/2)); if(yIndex >=width || xIndex>=height) { // printf("return %d %d\n",xIndex,yIndex); return; } // printf("a=%d c=%d\n",aabhas,colorWidthStep); // printf("%d %d %d a=%d c=%d\n",xIndex,yIndex,d_in[color_tid1],aabhas,colorWidthStep); //cout<<xIndex<<" "<<yIndex<<endl; if(yIndex%2==0 &&xIndex%2==0) { d_out[color_tid]=d_in[color_tid1]; d_out[color_tid+1]=d_in[color_tid1+1]; d_out[color_tid+2]=d_in[color_tid1+2]; } else { d_out[color_tid]=0; d_out[color_tid+1]=0;//d_in[color_tid1+1]; d_out[color_tid+2]=0;//d_in[color_tid1+2]; } } // printf("%d %d %d\n",xIndex,yIndex,d_out[color_tid]); //int no=1; //gaussian blur TODO /* float blur[5][5] ={ 0.0000 ,0.0000 , 0.0002 ,0.0000 ,0.0000, 0.0000 ,0.0113 , 0.0837 ,0.0113 ,0.0000, 0.0002 ,0.0837 , 0.6187 ,0.0837 ,0.0002, 0.0000 ,0.0113 , 0.0837 ,0.0113 ,0.0000, 0.0000 ,0.0000 , 0.0002 ,0.0000 ,0.0000 };*/ //printf("Aabhas\n"); // __syncthreads(); //printf("Tu\n"); __global__ void blur_image(unsigned char *d_in,unsigned char *d_out,int aabhas,int height,int width) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; const int color_tid = (xIndex)* aabhas + (3 * (yIndex)); float blur[5][5] ={ {0.0025, 0.0125, 0.02 , 0.0125, 0.0025}, {0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.02 , 0.1 , 0.16 , 0.1 , 0.02 }, { 0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.0025, 0.0125, 0.02 , 0.0125, 0.0025}}; int i,j; float output1,output2,output3; int loc; output1=0.0; output2=0.0; output3=0.0; //191 228 for(i=-2;i<=2;i++) { for(j=-2;j<=2;j++) { if(xIndex+i<height && yIndex+j<width) { if( (xIndex+i)>=0 && (yIndex)+j >=0) { loc= ( (xIndex)+i )*aabhas + (3*( (yIndex)+j)); // output1+=blur[i+2][j+2]*(unsigned char)(d_in[loc]); // output2+=blur[i+2][j+2]*(unsigned char)(d_in[loc+1]); // output3+=blur[i+2][j+2]*(unsigned char)(d_in[loc+2]); output1= output1+blur[i+2][j+2]*(float)(d_in[loc]); output2=output2+blur[i+2][j+2]*(float)(d_in[loc+1]); output3=output3+blur[i+2][j+2]*(float)(d_in[loc+2]); // if(xIndex==191 && yIndex==228) // printf("ap=%d %d %d %d %d\n",d_in[loc],i,j,loc,color_tid); } } //old blure /* if( (xIndex/2 )+i<height/2 && (yIndex/2)+j <width/2) if( (xIndex/2+i)>=0 && (yIndex/2)+j >=0) { //const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex)); loc= ( (xIndex/2)+i )*colorWidthStep + (3*( (yIndex/2)+j)); output1+=blur[i+2][j+2]*d_in[loc]; output2+=blur[i+2][j+2]*d_in[loc+1]; output3+=blur[i+2][j+2]*d_in[loc+2]; }*/ } } d_out[color_tid]=static_cast<unsigned char>(4*output1); d_out[color_tid+1]=static_cast<unsigned char>(4*output2); d_out[color_tid+2]=static_cast<unsigned char>(4*output3); // if(int(4*output1)-d_in[color_tid]<-50 && output1<10 ) // printf("%d %d %f %d %d\n",xIndex,yIndex,4*output1,d_in[color_tid],int(4*output1)-d_in[color_tid]); // d_out[color_tid]=d_in[color_tid1]; // d_out[color_tid+1]=d_in[color_tid1+1]; // d_out[color_tid+2]=d_in[color_tid1+2]; } __global__ void GAUSSGPU(unsigned char*Input,unsigned char*Output,int rows,int cols,int Instep,int Outstep) { int x=blockIdx.x*blockDim.x+threadIdx.x; int y=blockIdx.y*blockDim.y+threadIdx.y; if(x>rows||y>cols) return; /* float Gauss[5][5]={ 0.0030 , 0.0133 , 0.0219 , 0.0133 , 0.0030, 0.0133 , 0.0596 , 0.0983 , 0.0596 , 0.0133, 0.0219 , 0.0983 , 0.1621 , 0.0983 , 0.0219, 0.0133 , 0.0596 , 0.0983 , 0.0596 , 0.0133, 0.0030 , 0.0133 , 0.0219 , 0.0133 , 0.0030, };*/ float Gauss[5][5] ={ {0.0025, 0.0125, 0.02 , 0.0125, 0.0025}, {0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.02 , 0.1 , 0.16 , 0.1 , 0.02 }, { 0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.0025, 0.0125, 0.02 , 0.0125, 0.0025}}; int i,j,x1,y1; int In=x*Instep+3*y; int Out=x*Outstep+3*y; float r=0,g=0,b=0; for(i=-2;i<=2;i++) { for(j=-2;j<=2;j++) { x1=x+i; y1=y+j; if(x1>=0&&y1>=0) { if(x1<rows&&y1<cols) { In=x1*Instep+3*y1; b=b+float(Input[In])*Gauss[i+2][j+2]; g=g+float(Input[In+1])*Gauss[i+2][j+2]; r=r+float(Input[In+2])*Gauss[i+2][j+2]; } } } } Output[Out] = 4*static_cast<unsigned char>(b); Output[Out+1] = 4*static_cast<unsigned char>(g); Output[Out+2] = 4*static_cast<unsigned char>(r); } __global__ void GAUSSGPU1(unsigned char*Input,unsigned char*Output,int rows,int cols,int Instep,int Outstep) { int x=blockIdx.x*blockDim.x+threadIdx.x; int y=blockIdx.y*blockDim.y+threadIdx.y; if(x>rows||y>cols) return; /* float Gauss[5][5]={ 0.0030 , 0.0133 , 0.0219 , 0.0133 , 0.0030, 0.0133 , 0.0596 , 0.0983 , 0.0596 , 0.0133, 0.0219 , 0.0983 , 0.1621 , 0.0983 , 0.0219, 0.0133 , 0.0596 , 0.0983 , 0.0596 , 0.0133, 0.0030 , 0.0133 , 0.0219 , 0.0133 , 0.0030, };*/ float Gauss[5][5] ={ {0.0025, 0.0125, 0.02 , 0.0125, 0.0025}, {0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.02 , 0.1 , 0.16 , 0.1 , 0.02 }, { 0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.0025, 0.0125, 0.02 , 0.0125, 0.0025}}; int i,j,x1,y1; int In=x*Instep+3*y; int Out=x*Outstep+3*y; float r=0,g=0,b=0; for(i=-2;i<=2;i++) { for(j=-2;j<=2;j++) { x1=x+i; y1=y+j; if(x1>=0&&y1>=0) { if(x1<rows&&y1<cols) { In=x1*Instep+3*y1; b=b+float(Input[In])*Gauss[i+2][j+2]; g=g+float(Input[In+1])*Gauss[i+2][j+2]; r=r+float(Input[In+2])*Gauss[i+2][j+2]; } } } } Output[Out] = static_cast<unsigned char>(b); Output[Out+1] = static_cast<unsigned char>(g); Output[Out+2] = static_cast<unsigned char>(r); } void pyrup(Mat &input,Mat& output_1) { int row=input.rows; int col=input.cols; int newrow=row*2; int newcol=col*2; // cout<<newrow<<" "<<newcol<<endl; const int insize=input.step*row; Mat output(newrow,newcol,CV_8UC3); unsigned char *d_input,*d_output,*d_output1;// *d_output; hipMalloc<unsigned char>(&d_input,insize); hipMalloc<unsigned char>(&d_output,output.step*output.rows); hipMalloc<unsigned char>(&d_output1,output.step*output.rows); hipMemcpy(d_input,input.ptr(),insize,hipMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); hipLaunchKernelGGL(( pyrup_kernel), dim3(grid),dim3(block), 0, 0, d_input,d_output,input.step,output.step,newrow,newcol); hipDeviceSynchronize(); // blur_image<<<grid,block>>>(d_output,d_output1,output.step,newrow,newcol); hipLaunchKernelGGL(( GAUSSGPU), dim3(grid),dim3(block), 0, 0, d_output,d_output1,output.rows,output.cols,output.step,output.step); hipDeviceSynchronize(); // cout<<"\n\n\n\n\nIMAGE FINISHED\n\n\n\n\n"; hipMemcpy(output.ptr(),d_output1,output.step*output.rows,hipMemcpyDeviceToHost); output_1=output; } __global__ void submat_kernel(unsigned char *d_in1,unsigned char *d_in2,int colorWidthStep,int aabhas,int height,int width) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if(yIndex >=width || xIndex>=height) { return; } const int color_tid2 = (xIndex)* aabhas + (3 * (yIndex)); const int color_tid1= (xIndex)* colorWidthStep + (3 * (yIndex)); //printf("%d %d %d %d\n",xIndex,yIndex,d_in1[color_tid1],d_in2[color_tid2]); int s=1; d_in2[color_tid2]= s*(d_in1[color_tid1]-d_in2[color_tid2]); d_in2[color_tid2+1]=s*(d_in1[color_tid1+1]-d_in2[color_tid2+1]); d_in2[color_tid2+2]=s*(d_in1[color_tid1+2]-d_in2[color_tid2+2]); } void submat(Mat &input1,Mat& input2,Mat& output) { int row=min(input1.rows,input2.rows); int col=min(input1.cols,input2.cols); Mat out(row,col,CV_8UC3); unsigned char *d_input1,*d_input2,*d_output; const int insize1=input1.step*input1.rows; const int insize2=input2.step*input2.rows; // cout<<"aabhas="<<insize1<<" "<<insize2; // cout<<"aabhas1="<<input1.step<<" "<<input2.step; hipMalloc<unsigned char>(&d_input1,insize1); hipMalloc<unsigned char>(&d_input2,insize2); hipMalloc<unsigned char>(&d_output,out.step*out.rows); hipMemcpy(d_input1,input1.ptr(),insize1,hipMemcpyHostToDevice); hipMemcpy(d_input2,input2.ptr(),insize2,hipMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid( (row+block.x)/block.x , (col+block.y)/block.y); hipLaunchKernelGGL(( submat_kernel), dim3(grid),dim3(block), 0, 0, d_input1,d_input2,input1.step,input2.step,row,col); hipDeviceSynchronize(); hipMemcpy(out.ptr(),d_input2,out.step*out.rows,hipMemcpyDeviceToHost); output=out; } __global__ void add2mat_kernel(unsigned char *d_in1,unsigned char *d_in2,int colorWidthStep,int aabhas,int height,int width) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if(yIndex >=width || xIndex>=height) { return; } const int color_tid2 = (xIndex)* aabhas + (3 * (yIndex)); //const int color_tid1= (xIndex/2)* colorWidthStep + (3 * (yIndex/2)); const int color_tid1= (xIndex)* colorWidthStep + (3 * (yIndex)); //printf("%d %d %d %d\n",xIndex,yIndex,d_in1[color_tid1],d_in2[color_tid2]); if(d_in1[color_tid1]+d_in2[color_tid2]>255) { // d_in2[color_tid2]=255; // printf("YES %d\n ",d_in1[color_tid1]+d_in2[color_tid2]); } // else d_in2[color_tid2]=(d_in1[color_tid1]+d_in2[color_tid2]); d_in2[color_tid2+1]=(d_in1[color_tid1+1]+d_in2[color_tid2+1]); d_in2[color_tid2+2]=(d_in1[color_tid1+2]+d_in2[color_tid2+2]); } void add2mat(Mat &input1,Mat& input2,Mat& output) { pyrup(input1,input1); int row=max(input1.rows,input2.rows); int col=max(input1.cols,input2.cols); Mat out(row,col,CV_8UC3); unsigned char *d_input1,*d_input2,*d_output; const int insize1=input1.step*input1.rows; const int insize2=input2.step*input2.rows; // cout<<"aabhas="<<insize1<<" "<<insize2; // cout<<"aabhas1="<<input1.step<<" "<<input2.step; hipMalloc<unsigned char>(&d_input1,insize1); hipMalloc<unsigned char>(&d_input2,insize2); hipMalloc<unsigned char>(&d_output,out.step*out.rows); hipMemcpy(d_input1,input1.ptr(),insize1,hipMemcpyHostToDevice); hipMemcpy(d_input2,input2.ptr(),insize2,hipMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid( (row+block.x)/block.x , (col+block.y)/block.y); hipLaunchKernelGGL(( add2mat_kernel), dim3(grid),dim3(block), 0, 0, d_input1,d_input2,input1.step,input2.step,row,col); hipDeviceSynchronize(); hipMemcpy(out.ptr(),d_input2,out.step*out.rows,hipMemcpyDeviceToHost); output=out; } __global__ void pyrdown_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; const int color_tid = (xIndex)* aabhas + (3 * (yIndex)); const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex)); if(yIndex >=width || xIndex>=height) { // printf("return %d %d\n",xIndex,yIndex); return; } // printf("a=%d c=%d\n",aabhas,colorWidthStep); // printf("%d %d %d a=%d c=%d\n",xIndex,yIndex,d_in[color_tid1],aabhas,colorWidthStep); //cout<<xIndex<<" "<<yIndex<<endl; d_out[color_tid]=d_in[color_tid1]; d_out[color_tid+1]=d_in[color_tid1+1]; d_out[color_tid+2]=d_in[color_tid1+2]; //gaussian blur TODO /* 0.0000 0.0000 0.0002 0.0000 0.0000 0.0000 0.0113 0.0837 0.0113 0.0000 0.0002 0.0837 0.6187 0.0837 0.0002 0.0000 0.0113 0.0837 0.0113 0.0000 0.0000 0.0000 0.0002 0.0000 0.0000 */ /* float blur[5][5] ={ 0.0000 ,0.0000 , 0.0002 ,0.0000 ,0.0000, 0.0000 ,0.0113 , 0.0837 ,0.0113 ,0.0000, 0.0002 ,0.0837 , 0.6187 ,0.0837 ,0.0002, 0.0000 ,0.0113 , 0.0837 ,0.0113 ,0.0000, 0.0000 ,0.0000 , 0.0002 ,0.0000 ,0.0000 }; */ /* float blur[5][5] ={ {0.0025, 0.0125, 0.02 , 0.0125, 0.0025}, {0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.02 , 0.1 , 0.16 , 0.1 , 0.02 }, { 0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.0025, 0.0125, 0.02 , 0.0125, 0.0025}}; int i,j; float output1,output2,output3; int loc; output1=0.0; output2=0.0; output3=0.0; for(i=-2;i<=2;i++) { for(j=-2;j<=2;j++) { if(2*xIndex+i<2*height && 2*yIndex+j <2*width) if(2*xIndex+i>=0 && 2*yIndex+j >=0) { //const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex)); loc= (2*xIndex+i )*colorWidthStep + (3*(2*yIndex+j)); output1+=blur[i+2][j+2]*d_in[loc]; output2+=blur[i+2][j+2]*d_in[loc+1]; output3+=blur[i+2][j+2]*d_in[loc+2]; } } } d_out[color_tid]=output1; d_out[color_tid+1]=output2; d_out[color_tid+2]=output3; // printf("%f %d %d\n",output1,d_in[color_tid1],int(output1)-d_in[color_tid1]); // d_out[color_tid]=d_in[color_tid1]; // d_out[color_tid+1]=d_in[color_tid1+1]; // d_out[color_tid+2]=d_in[color_tid1+2]; */ } void pyrdown(Mat &input,Mat& output_1,Mat &output_2,Mat& output_3,int mask=0) { int row=input.rows; int col=input.cols; int newrow=row/2; int newcol=col/2; const int insize=input.step*row; Mat output(newrow,newcol,CV_8UC3); unsigned char *d_input,*d_output,*d_output1,*d_output2,*d_temp;// *d_output; hipMalloc<unsigned char>(&d_input,insize); hipMalloc<unsigned char>(&d_temp,insize); //cout<<" insize"<<insize<<" d="<<newrow*newcol*sizeof(unsigned char)<<endl; hipMalloc<unsigned char>(&d_output,output.step*output.rows); hipMemcpy(d_input,input.ptr(),insize,hipMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); const dim3 grid_1((input.rows+block.x)/block.x , (input.cols+block.y)/block.y ); hipLaunchKernelGGL(( GAUSSGPU1), dim3(grid_1),dim3(block), 0, 0, d_input,d_temp,input.rows,input.cols,input.step,input.step); hipDeviceSynchronize(); // Mat outputa1(input.rows,input.cols,CV_8UC3); // hipMemcpy(outputa1.ptr(),d_temp,input.step*input.rows,hipMemcpyDeviceToHost); // namedWindow("aabhas"); // imshow("aabhas",outputa1); // waitKey(0); hipLaunchKernelGGL(( pyrdown_kernel), dim3(grid),dim3(block), 0, 0, d_temp,d_output,input.step,output.step,newrow,newcol); hipDeviceSynchronize(); hipMemcpy(output.ptr(),d_output,output.step*output.rows,hipMemcpyDeviceToHost); hipFree(d_temp); output_1=output; row=output.rows; col=output.cols; newrow=row/2; newcol=col/2; cv::Mat output1(newrow,newcol,CV_8UC3); const int insize1=output.step*row; hipMalloc<unsigned char>(&d_temp,output.step*output.rows); hipMalloc<unsigned char>(&d_output1,output.step*output.rows/4); const dim3 block1(16,16); const dim3 grid1( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); const dim3 grid_2( (row+block.x)/block.x , (col+block.y)/block.y ); hipLaunchKernelGGL(( GAUSSGPU1), dim3(grid_2),dim3(block), 0, 0, d_output,d_temp,output.rows,output.cols,output.step,output.step); hipDeviceSynchronize(); hipLaunchKernelGGL(( pyrdown_kernel), dim3(grid),dim3(block), 0, 0, d_temp,d_output1,output.step,output1.step,newrow,newcol); hipDeviceSynchronize(); hipMemcpy(output1.ptr(),d_output1,output1.step*output1.rows,hipMemcpyDeviceToHost); hipFree(d_temp); output_2=output1; row=output1.rows; col=output1.cols; newrow=row/2; newcol=col/2; cv::Mat output2(newrow,newcol,CV_8UC3); hipMalloc<unsigned char>(&d_temp,output1.step*output1.rows); const dim3 grid_3( (row+block.x)/block.x , (col+block.y)/block.y ); hipLaunchKernelGGL(( GAUSSGPU1), dim3(grid_3),dim3(block), 0, 0, d_output1,d_temp,output1.rows,output1.cols,output1.step,output1.step); hipMalloc<unsigned char>(&d_output2,output1.step*output1.rows/4); const dim3 block2(16,16); const dim3 grid2( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); hipLaunchKernelGGL(( pyrdown_kernel), dim3(grid),dim3(block), 0, 0, d_temp,d_output2,output1.step,output2.step,newrow,newcol); hipDeviceSynchronize(); hipMemcpy(output2.ptr(),d_output2,output2.step*output2.rows,hipMemcpyDeviceToHost); output_3=output2; } void pyrdownmask(Mat &input,Mat& output_1,Mat &output_2,Mat& output_3) { int row=input.rows; int col=input.cols; int newrow=row/2; int newcol=col/2; const int insize=input.step*row; Mat output(newrow,newcol,CV_8UC3); unsigned char *d_input,*d_output,*d_output1,*d_output2;// *d_output; hipMalloc<unsigned char>(&d_input,insize); //cout<<" insize"<<insize<<" d="<<newrow*newcol*sizeof(unsigned char)<<endl; hipMalloc<unsigned char>(&d_output,output.step*output.rows); hipMemcpy(d_input,input.ptr(),insize,hipMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); const dim3 grid_1((input.rows+block.x)/block.x , (input.cols+block.y)/block.y ); // Mat outputa1(input.rows,input.cols,CV_8UC3); // hipMemcpy(outputa1.ptr(),d_temp,input.step*input.rows,hipMemcpyDeviceToHost); // namedWindow("aabhas"); // imshow("aabhas",outputa1); // waitKey(0); hipLaunchKernelGGL(( pyrdown_kernel), dim3(grid),dim3(block), 0, 0, d_input,d_output,input.step,output.step,newrow,newcol); hipDeviceSynchronize(); hipMemcpy(output.ptr(),d_output,output.step*output.rows,hipMemcpyDeviceToHost); output_1=output; row=output.rows; col=output.cols; newrow=row/2; newcol=col/2; cv::Mat output1(newrow,newcol,CV_8UC3); const int insize1=output.step*row; hipMalloc<unsigned char>(&d_output1,output.step*output.rows/4); const dim3 block1(16,16); const dim3 grid1( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); const dim3 grid_2( (row+block.x)/block.x , (col+block.y)/block.y ); hipLaunchKernelGGL(( pyrdown_kernel), dim3(grid),dim3(block), 0, 0, d_output,d_output1,output.step,output1.step,newrow,newcol); hipDeviceSynchronize(); hipMemcpy(output1.ptr(),d_output1,output1.step*output1.rows,hipMemcpyDeviceToHost); output_2=output1; row=output1.rows; col=output1.cols; newrow=row/2; newcol=col/2; cv::Mat output2(newrow,newcol,CV_8UC3); const dim3 grid_3( (row+block.x)/block.x , (col+block.y)/block.y ); hipMalloc<unsigned char>(&d_output2,output1.step*output1.rows/4); const dim3 block2(16,16); const dim3 grid2( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); hipLaunchKernelGGL(( pyrdown_kernel), dim3(grid),dim3(block), 0, 0, d_output1,d_output2,output1.step,output2.step,newrow,newcol); hipDeviceSynchronize(); hipMemcpy(output2.ptr(),d_output2,output2.step*output2.rows,hipMemcpyDeviceToHost); output_3=output2; } __global__ void imageblend_kernel(unsigned char *d_input1,unsigned char *d_input2,int width,int height,int colorWidthStep,int aabhas,unsigned char *mask,int maskstep) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if((xIndex>=width) || (yIndex>=height)) return; const int color_tid1 = (yIndex)* aabhas + (3 * (xIndex)); const int color_tid2 = (yIndex)* colorWidthStep + (3 * (xIndex)); const int color_mask=(yIndex)*maskstep+(3*(xIndex)); float m= ( unsigned char)mask[color_mask]; if(mask[color_mask]!=255 && mask[color_mask]!=0) printf("c=%d %d %d %d\n",( unsigned char)mask[color_mask],xIndex,yIndex,height); float m1=m/255.0; float m2=1-m1; // printf("%d %d %d\n",mask[color_mask],mask[color_mask+1],mask[color_mask+2]); int x=d_input2[color_tid2]; d_input2[color_tid2]=static_cast<unsigned char> ((m1)* d_input2[color_tid2] +(m2)*d_input1[color_tid1]); // if(m1<=0.58 && m1>=0.22) // printf("%f %f %d %d %d\n %d %d %d\n",m1,m2,xIndex,yIndex, mask[color_mask],d_input2[color_tid2],x,d_input1[color_tid1]); m= ( unsigned char)mask[color_mask+1]; m1=m/255.0; m2=1-m1; // printf("2%f %f %d %d \n",m1,m2,xIndex,yIndex); d_input2[color_tid2+1]=static_cast<unsigned char>((m1)* d_input2[color_tid2+1] +(m2)*d_input1[color_tid1+1]); m= ( unsigned char)mask[color_mask+2]; m1=m/255.0; m2=1-m1; // printf("3%f %f %d %d \n",m1,m2,xIndex,yIndex); d_input2[color_tid2+2]=static_cast<unsigned char> ((m1)* d_input2[color_tid2+2] +(m2)*d_input1[color_tid1+2]); // d_input2[color_tid2+1]=(beta/100.0)* d_input2[color_tid2+1] +(alpha/100.0)*d_input1[color_tid1+1]; // d_input2[color_tid2+2]=(beta/100.0)* d_input2[color_tid2+2] +(alpha/100.0)*d_input1[color_tid1+2] ; // printf("%f\n",m); // d_input2[color_tid]= d_input // float alpha=100,beta=0; //if((startx1+xIndex<width) && (starty1+yIndex<height)) { // if((startx+xIndex<=endx) && (starty+yIndex<=endy)) { //const int color_tid1 = (yIndex +starty)* aabhas + (3 * (xIndex+startx)); // const int color_tid2 = (yIndex +starty1)* colorWidthStep + (3 * (xIndex+startx1)); //int a=d_input2[color_tid2]; // d_input2[color_tid2]=(beta/100.0)* d_input2[color_tid2] +(alpha/100.0)*d_input1[color_tid1]; // d_input2[color_tid2+1]=(beta/100.0)* d_input2[color_tid2+1] +(alpha/100.0)*d_input1[color_tid1+1]; // d_input2[color_tid2+2]=(beta/100.0)* d_input2[color_tid2+2] +(alpha/100.0)*d_input1[color_tid1+2] ; } } } void blendimage(Mat& input1 , Mat& input2 ,Mat& output1,int scale,Mat & mask) { const int insize1=input1.step * input1.rows; const int insize2=input2.step * input2.rows; const int masksize=mask.step * mask.rows; unsigned char *d_input1,*d_input2,*d_mask;// *d_output; int x,y; Mat img=mask; for(x=0;x<img.cols;x++) for(y=0;y<img.rows;y++) if(img.at<cv::Vec3b>(y,x)[0]!=0 &&img.at<cv::Vec3b>(y,x)[0]!=255) { cout<<"\nmask fail\n"<<endl; } // cout<<insize1<<" "<<insize2<<" --- "<<mask.step*mask.rows<<endl; // cout<<mask.cols<<" m "<<mask.rows<<" "<<mask.step<<endl; // cout<<input1.cols<<" 1 "<<input1.rows<<" "<<input1.step<<endl; // cout<<input2.cols<<" 2 "<<input2.rows<<" "<<input2.step<<endl; hipMalloc<unsigned char>(&d_input1,insize1); hipMalloc<unsigned char>(&d_input2,insize2); hipMalloc<unsigned char>(&d_mask,masksize); hipMemcpy(d_input1,input1.ptr(),insize1,hipMemcpyHostToDevice); hipMemcpy(d_input2,input2.ptr(),insize2,hipMemcpyHostToDevice); hipMemcpy(d_mask,mask.ptr(),masksize,hipMemcpyHostToDevice); const dim3 block(16,16); Mat output(input2.rows,input2.cols,CV_8UC3); /* startx=startx/scale; starty=starty/scale; startx1=startx1/scale; starty1=starty1/scale; endx=endx/scale; endy=endy/scale;*/ const dim3 grid((input2.cols + block.x )/block.x, (input2.rows + block.y )/block.y); hipLaunchKernelGGL(( imageblend_kernel), dim3(grid),dim3(block), 0, 0, d_input1,d_input2,input2.cols,input2.rows,input2.step,input1.step,d_mask,mask.step); hipDeviceSynchronize(); hipMemcpy(output.ptr(),d_input2,insize2,hipMemcpyDeviceToHost); startx=startx*scale; starty=starty*scale; startx1=startx1*scale; starty1=starty1*scale; endx=endx*scale; endy=endy*scale; output1=output; } void display(Mat &img) { namedWindow("debug",1); imshow("debug",img); waitKey(0); } int main(int argc, char** argv) { int debug=0; // Read image from file // cout<<"Enter the vale of alpha and beta\n"; // cin>>alpha>>beta; Mat img1 = imread("dataset/pepper.jpg"); Mat mask_1= imread("dataset/mask.jpg"); //if fail to read the image if ( img1.empty() ) { cout << "Error loading the image 1" << endl; return -1; } //Create a window namedWindow("Mask", 1); imshow("Mask",mask_1); waitKey(0); namedWindow("Image 1", 1); //set the callback function for any mouse event setMouseCallback("Image 1", CallBackFunc_1, NULL); //show the image imshow("Image 1", img1); // Wait until user press some key waitKey(0); cout<<"position of first\n"<<startx<<" "<<starty<<" "<<endx<<" "<<endy<<endl; Mat img2=imread("dataset/snow.jpg"); if ( img2.empty() ) { cout << "Error loading the image 2" << endl; return -1; } namedWindow("Image 2",CV_WINDOW_AUTOSIZE); setMouseCallback("Image 2", CallBackFunc_2, NULL); //show the image imshow("Image 2", img2); waitKey(0); cout<<"position of second\n"<<startx1<<" "<<starty1<<endl; struct timespec t1, t2; clock_gettime(CLOCK_MONOTONIC, &t1); int newrow=img1.rows/2; int newcol=img1.cols/2; //cv::Mat output(img1);//(newrow,newcol,CV_8UC3);; cv::Mat output(newrow,newcol,CV_8UC3);; Mat output_1,output_2,output_3; // imshow("output",img1); // waitKey(0); cv::Mat finaloutput; Mat mask_2,mask_3,mask_4; // pyrdown(mask_1,mask_2,mask_3,mask_4); pyrdownmask(mask_1,mask_2,mask_3,mask_4); // display(mask_3); // cout<<"mat=\n"<<mask_1<<endl; pyrdown(img1,output_1,output_2,output_3); // cv::pyrDown(img1,output); // blendimage(img1,img2,output); if(debug==1) { namedWindow("output_0",1); imshow("output_0",img1); waitKey(0); namedWindow("output_1",1); imshow("output_1",output_1); waitKey(0); namedWindow("output_2",1); imshow("output_2",output_2); waitKey(0); namedWindow("output_3",1); imshow("output_3",output_3); waitKey(0); } // cv::Mat output1(newrow*2,newcol*2,CV_8UC3); Mat pyoutput_1,pyoutput_2,pyoutput_3; pyrup(output_1,pyoutput_1); pyrup(output_2,pyoutput_2); pyrup(output_3,pyoutput_3); if(debug==1) { namedWindow("showall",1); imshow("showall",pyoutput_1); waitKey(0); imshow("showall",pyoutput_2); waitKey(0); imshow("showall",pyoutput_3); waitKey(0); } //cout<<"Mat="<<pyoutput_3<<endl; // namedWindow("output",1); // imshow("output",pyoutput_1-img1); // waitKey(0); if(1==1) { Mat LA2,LA1,LA0; Mat LA3=output_3; submat(img1,pyoutput_1,LA0); submat(output_1,pyoutput_2,LA1); submat(output_2,pyoutput_3,LA2); // cout<<LA0<<endl; // display(LA0); if(debug==1) { namedWindow("submat1",1); imshow("submat1",LA0); waitKey(0); namedWindow("submat2",1); imshow("submat2",LA1); waitKey(0); namedWindow("submat3",1); imshow("submat3",LA2); waitKey(0); namedWindow("submat4",1); imshow("submat4",LA3); waitKey(0); } Mat output1_1,output1_2,output1_3; pyrdown(img2,output1_1,output1_2,output1_3); // namedWindow("output_0",1); if(debug==1) { imshow("output_0",img2); waitKey(0); // namedWindow("output_1",1); imshow("output_1",output1_1); waitKey(0); // namedWindow("output_2",1); imshow("output_2",output1_2); waitKey(0); // namedWindow("output_3",1); imshow("output_3",output1_3); waitKey(0); } Mat pyoutput1_1,pyoutput1_2,pyoutput1_3; pyrup(output1_1,pyoutput1_1); pyrup(output1_2,pyoutput1_2); pyrup(output1_3,pyoutput1_3); Mat LB2,LB1,LB0; Mat LB3=output1_3; submat(img2,pyoutput1_1,LB0); submat(output1_1,pyoutput1_2,LB1); submat(output1_2,pyoutput1_3,LB2); Mat LS3,LS2,LS1,LS0; // namedWindow("submat1",1); if(debug==1) { imshow("submat1",LB0); waitKey(0); // namedWindow("submat2",1); imshow("submat2",LB1); waitKey(0); // namedWindow("submat3",1); imshow("submat3",LB2); waitKey(0); // namedWindow("submat4",1); imshow("submat4",LB3); waitKey(0); // cout<<LA0.rows<<" "<<LA0.cols<<endl; // cout<<LB0.rows<<" "<<LB0.cols<<endl; } int gauss=0; if(gauss==1) GaussianBlur(mask_1,mask_1,Size( 7, 7), 0, 0); blendimage(LA0,LB0,LS0,1,mask_1); // imwrite("debug/mask_1.jpg",mask_1); // display(LS0); if(debug==1) { namedWindow("LS0",1); imshow("LS0",LS0); waitKey(0); } if(gauss==1) GaussianBlur(mask_2,mask_2,Size( 7, 7), 0, 0); blendimage(LA1,LB1,LS1,2,mask_2); // cout<<mask_2<<endl; // imwrite("debug/mask_2.jpg",mask_2); if(debug==1) { namedWindow("LS1",1); imshow("LS1",LS1); waitKey(0); } if(gauss==1) GaussianBlur(mask_3,mask_3,Size( 7, 7), 0, 0); blendimage(LA2,LB2,LS2,4,mask_3); // imwrite("debug/mask_3.jpg",mask_3); if(debug==1) { namedWindow("LS2",1); imshow("LS2",LS2); waitKey(0); } if(gauss==1) GaussianBlur(mask_4,mask_4,Size( 7, 7), 0, 0); GaussianBlur(mask_4,mask_4,Size( 7, 7), 0, 0); blendimage(LA3,LB3,LS3,8,mask_4); // cout<<mask_4; // imwrite("debug/mask_4.jpg",mask_4); if(debug==1) { namedWindow("LS3",1); imshow("LS3",LS3); waitKey(0); } Mat final0,final1,final2,final3; add2mat(LS3,LS2,final3); if(debug==1) { namedWindow("final3",1); imshow("final3",final3); waitKey(0); } add2mat(final3,LS1,final2); if(debug==1) { namedWindow("final2",1); imshow("final2",final2); waitKey(0); } clock_gettime(CLOCK_MONOTONIC, &t2); float time = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); printf("Time (in milliseconds): %f\n", time); add2mat(final2,LS0,final1); namedWindow("final1",1); imshow("final1",final1); waitKey(0); imwrite("final1.jpg",final1); // Mat yo; // medianBlur(final1, yo, 3 ); // GaussianBlur(final1,yo,Size( 7, 7), 0, 0); // display(yo); } /* add2mat(LS0,final1,final0); namedWindow("final0",1); imshow("final0",final0); waitKey(0);*/ /* pyrup(output_2,pyoutput_2); imshow("output",pyoutput_2-output_1); waitKey(0); pyrup(output_3,pyoutput_3); imshow("output",pyoutput_3-output_2); waitKey(0);*/ // pyrdown(img1,output); // cout<<img1.rows<<" "<<img1.cols<<" "<<output.rows<<" "<<output.cols<<endl; return 0; }
3eeb68ab1d24a5345bc33fe020468c690fa57be9.cu
#include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" #include <iostream> #include<cuda.h> #include "cuPrintf.cuh" #include "cuPrintf.cu" using namespace std; using namespace cv; int noofclick=0; int startx,starty,endx,endy; int startx1,starty1; float alpha=100,beta=0; void CallBackFunc_1(int event, int x, int y, int flags, void* userdata) { if ( event == EVENT_LBUTTONDOWN ) { noofclick++; cout << "Left button of the mouse is clicked - position (" << x << ", " << y << ")" << endl; if(noofclick==2) { endx=x; endy=y; cvDestroyWindow("Image 1"); } else { startx=x; starty=y; } } } void CallBackFunc_2(int event, int x, int y, int flags, void* userdata) { if ( event == EVENT_LBUTTONDOWN ) { cout << "1Left button of the mouse is clicked - position (" << x << ", " << y << ")" << endl; startx1=x; starty1=y; cvDestroyWindow("Image 2"); } } __global__ void pyrup_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; const int color_tid = (xIndex)* aabhas + (3 * (yIndex)); const int color_tid1= (xIndex/2)* colorWidthStep + (3 * (yIndex/2)); if(yIndex >=width || xIndex>=height) { // printf("return %d %d\n",xIndex,yIndex); return; } // printf("a=%d c=%d\n",aabhas,colorWidthStep); // printf("%d %d %d a=%d c=%d\n",xIndex,yIndex,d_in[color_tid1],aabhas,colorWidthStep); //cout<<xIndex<<" "<<yIndex<<endl; if(yIndex%2==0 &&xIndex%2==0) { d_out[color_tid]=d_in[color_tid1]; d_out[color_tid+1]=d_in[color_tid1+1]; d_out[color_tid+2]=d_in[color_tid1+2]; } else { d_out[color_tid]=0; d_out[color_tid+1]=0;//d_in[color_tid1+1]; d_out[color_tid+2]=0;//d_in[color_tid1+2]; } } // printf("%d %d %d\n",xIndex,yIndex,d_out[color_tid]); //int no=1; //gaussian blur TODO /* float blur[5][5] ={ 0.0000 ,0.0000 , 0.0002 ,0.0000 ,0.0000, 0.0000 ,0.0113 , 0.0837 ,0.0113 ,0.0000, 0.0002 ,0.0837 , 0.6187 ,0.0837 ,0.0002, 0.0000 ,0.0113 , 0.0837 ,0.0113 ,0.0000, 0.0000 ,0.0000 , 0.0002 ,0.0000 ,0.0000 };*/ //printf("Aabhas\n"); // __syncthreads(); //printf("Tu\n"); __global__ void blur_image(unsigned char *d_in,unsigned char *d_out,int aabhas,int height,int width) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; const int color_tid = (xIndex)* aabhas + (3 * (yIndex)); float blur[5][5] ={ {0.0025, 0.0125, 0.02 , 0.0125, 0.0025}, {0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.02 , 0.1 , 0.16 , 0.1 , 0.02 }, { 0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.0025, 0.0125, 0.02 , 0.0125, 0.0025}}; int i,j; float output1,output2,output3; int loc; output1=0.0; output2=0.0; output3=0.0; //191 228 for(i=-2;i<=2;i++) { for(j=-2;j<=2;j++) { if(xIndex+i<height && yIndex+j<width) { if( (xIndex+i)>=0 && (yIndex)+j >=0) { loc= ( (xIndex)+i )*aabhas + (3*( (yIndex)+j)); // output1+=blur[i+2][j+2]*(unsigned char)(d_in[loc]); // output2+=blur[i+2][j+2]*(unsigned char)(d_in[loc+1]); // output3+=blur[i+2][j+2]*(unsigned char)(d_in[loc+2]); output1= output1+blur[i+2][j+2]*(float)(d_in[loc]); output2=output2+blur[i+2][j+2]*(float)(d_in[loc+1]); output3=output3+blur[i+2][j+2]*(float)(d_in[loc+2]); // if(xIndex==191 && yIndex==228) // printf("ap=%d %d %d %d %d\n",d_in[loc],i,j,loc,color_tid); } } //old blure /* if( (xIndex/2 )+i<height/2 && (yIndex/2)+j <width/2) if( (xIndex/2+i)>=0 && (yIndex/2)+j >=0) { //const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex)); loc= ( (xIndex/2)+i )*colorWidthStep + (3*( (yIndex/2)+j)); output1+=blur[i+2][j+2]*d_in[loc]; output2+=blur[i+2][j+2]*d_in[loc+1]; output3+=blur[i+2][j+2]*d_in[loc+2]; }*/ } } d_out[color_tid]=static_cast<unsigned char>(4*output1); d_out[color_tid+1]=static_cast<unsigned char>(4*output2); d_out[color_tid+2]=static_cast<unsigned char>(4*output3); // if(int(4*output1)-d_in[color_tid]<-50 && output1<10 ) // printf("%d %d %f %d %d\n",xIndex,yIndex,4*output1,d_in[color_tid],int(4*output1)-d_in[color_tid]); // d_out[color_tid]=d_in[color_tid1]; // d_out[color_tid+1]=d_in[color_tid1+1]; // d_out[color_tid+2]=d_in[color_tid1+2]; } __global__ void GAUSSGPU(unsigned char*Input,unsigned char*Output,int rows,int cols,int Instep,int Outstep) { int x=blockIdx.x*blockDim.x+threadIdx.x; int y=blockIdx.y*blockDim.y+threadIdx.y; if(x>rows||y>cols) return; /* float Gauss[5][5]={ 0.0030 , 0.0133 , 0.0219 , 0.0133 , 0.0030, 0.0133 , 0.0596 , 0.0983 , 0.0596 , 0.0133, 0.0219 , 0.0983 , 0.1621 , 0.0983 , 0.0219, 0.0133 , 0.0596 , 0.0983 , 0.0596 , 0.0133, 0.0030 , 0.0133 , 0.0219 , 0.0133 , 0.0030, };*/ float Gauss[5][5] ={ {0.0025, 0.0125, 0.02 , 0.0125, 0.0025}, {0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.02 , 0.1 , 0.16 , 0.1 , 0.02 }, { 0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.0025, 0.0125, 0.02 , 0.0125, 0.0025}}; int i,j,x1,y1; int In=x*Instep+3*y; int Out=x*Outstep+3*y; float r=0,g=0,b=0; for(i=-2;i<=2;i++) { for(j=-2;j<=2;j++) { x1=x+i; y1=y+j; if(x1>=0&&y1>=0) { if(x1<rows&&y1<cols) { In=x1*Instep+3*y1; b=b+float(Input[In])*Gauss[i+2][j+2]; g=g+float(Input[In+1])*Gauss[i+2][j+2]; r=r+float(Input[In+2])*Gauss[i+2][j+2]; } } } } Output[Out] = 4*static_cast<unsigned char>(b); Output[Out+1] = 4*static_cast<unsigned char>(g); Output[Out+2] = 4*static_cast<unsigned char>(r); } __global__ void GAUSSGPU1(unsigned char*Input,unsigned char*Output,int rows,int cols,int Instep,int Outstep) { int x=blockIdx.x*blockDim.x+threadIdx.x; int y=blockIdx.y*blockDim.y+threadIdx.y; if(x>rows||y>cols) return; /* float Gauss[5][5]={ 0.0030 , 0.0133 , 0.0219 , 0.0133 , 0.0030, 0.0133 , 0.0596 , 0.0983 , 0.0596 , 0.0133, 0.0219 , 0.0983 , 0.1621 , 0.0983 , 0.0219, 0.0133 , 0.0596 , 0.0983 , 0.0596 , 0.0133, 0.0030 , 0.0133 , 0.0219 , 0.0133 , 0.0030, };*/ float Gauss[5][5] ={ {0.0025, 0.0125, 0.02 , 0.0125, 0.0025}, {0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.02 , 0.1 , 0.16 , 0.1 , 0.02 }, { 0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.0025, 0.0125, 0.02 , 0.0125, 0.0025}}; int i,j,x1,y1; int In=x*Instep+3*y; int Out=x*Outstep+3*y; float r=0,g=0,b=0; for(i=-2;i<=2;i++) { for(j=-2;j<=2;j++) { x1=x+i; y1=y+j; if(x1>=0&&y1>=0) { if(x1<rows&&y1<cols) { In=x1*Instep+3*y1; b=b+float(Input[In])*Gauss[i+2][j+2]; g=g+float(Input[In+1])*Gauss[i+2][j+2]; r=r+float(Input[In+2])*Gauss[i+2][j+2]; } } } } Output[Out] = static_cast<unsigned char>(b); Output[Out+1] = static_cast<unsigned char>(g); Output[Out+2] = static_cast<unsigned char>(r); } void pyrup(Mat &input,Mat& output_1) { int row=input.rows; int col=input.cols; int newrow=row*2; int newcol=col*2; // cout<<newrow<<" "<<newcol<<endl; const int insize=input.step*row; Mat output(newrow,newcol,CV_8UC3); unsigned char *d_input,*d_output,*d_output1;// *d_output; cudaMalloc<unsigned char>(&d_input,insize); cudaMalloc<unsigned char>(&d_output,output.step*output.rows); cudaMalloc<unsigned char>(&d_output1,output.step*output.rows); cudaMemcpy(d_input,input.ptr(),insize,cudaMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); pyrup_kernel<<<grid,block>>>(d_input,d_output,input.step,output.step,newrow,newcol); cudaDeviceSynchronize(); // blur_image<<<grid,block>>>(d_output,d_output1,output.step,newrow,newcol); GAUSSGPU<<<grid,block>>>(d_output,d_output1,output.rows,output.cols,output.step,output.step); cudaDeviceSynchronize(); // cout<<"\n\n\n\n\nIMAGE FINISHED\n\n\n\n\n"; cudaMemcpy(output.ptr(),d_output1,output.step*output.rows,cudaMemcpyDeviceToHost); output_1=output; } __global__ void submat_kernel(unsigned char *d_in1,unsigned char *d_in2,int colorWidthStep,int aabhas,int height,int width) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if(yIndex >=width || xIndex>=height) { return; } const int color_tid2 = (xIndex)* aabhas + (3 * (yIndex)); const int color_tid1= (xIndex)* colorWidthStep + (3 * (yIndex)); //printf("%d %d %d %d\n",xIndex,yIndex,d_in1[color_tid1],d_in2[color_tid2]); int s=1; d_in2[color_tid2]= s*(d_in1[color_tid1]-d_in2[color_tid2]); d_in2[color_tid2+1]=s*(d_in1[color_tid1+1]-d_in2[color_tid2+1]); d_in2[color_tid2+2]=s*(d_in1[color_tid1+2]-d_in2[color_tid2+2]); } void submat(Mat &input1,Mat& input2,Mat& output) { int row=min(input1.rows,input2.rows); int col=min(input1.cols,input2.cols); Mat out(row,col,CV_8UC3); unsigned char *d_input1,*d_input2,*d_output; const int insize1=input1.step*input1.rows; const int insize2=input2.step*input2.rows; // cout<<"aabhas="<<insize1<<" "<<insize2; // cout<<"aabhas1="<<input1.step<<" "<<input2.step; cudaMalloc<unsigned char>(&d_input1,insize1); cudaMalloc<unsigned char>(&d_input2,insize2); cudaMalloc<unsigned char>(&d_output,out.step*out.rows); cudaMemcpy(d_input1,input1.ptr(),insize1,cudaMemcpyHostToDevice); cudaMemcpy(d_input2,input2.ptr(),insize2,cudaMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid( (row+block.x)/block.x , (col+block.y)/block.y); submat_kernel<<<grid,block>>>(d_input1,d_input2,input1.step,input2.step,row,col); cudaDeviceSynchronize(); cudaMemcpy(out.ptr(),d_input2,out.step*out.rows,cudaMemcpyDeviceToHost); output=out; } __global__ void add2mat_kernel(unsigned char *d_in1,unsigned char *d_in2,int colorWidthStep,int aabhas,int height,int width) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if(yIndex >=width || xIndex>=height) { return; } const int color_tid2 = (xIndex)* aabhas + (3 * (yIndex)); //const int color_tid1= (xIndex/2)* colorWidthStep + (3 * (yIndex/2)); const int color_tid1= (xIndex)* colorWidthStep + (3 * (yIndex)); //printf("%d %d %d %d\n",xIndex,yIndex,d_in1[color_tid1],d_in2[color_tid2]); if(d_in1[color_tid1]+d_in2[color_tid2]>255) { // d_in2[color_tid2]=255; // printf("YES %d\n ",d_in1[color_tid1]+d_in2[color_tid2]); } // else d_in2[color_tid2]=(d_in1[color_tid1]+d_in2[color_tid2]); d_in2[color_tid2+1]=(d_in1[color_tid1+1]+d_in2[color_tid2+1]); d_in2[color_tid2+2]=(d_in1[color_tid1+2]+d_in2[color_tid2+2]); } void add2mat(Mat &input1,Mat& input2,Mat& output) { pyrup(input1,input1); int row=max(input1.rows,input2.rows); int col=max(input1.cols,input2.cols); Mat out(row,col,CV_8UC3); unsigned char *d_input1,*d_input2,*d_output; const int insize1=input1.step*input1.rows; const int insize2=input2.step*input2.rows; // cout<<"aabhas="<<insize1<<" "<<insize2; // cout<<"aabhas1="<<input1.step<<" "<<input2.step; cudaMalloc<unsigned char>(&d_input1,insize1); cudaMalloc<unsigned char>(&d_input2,insize2); cudaMalloc<unsigned char>(&d_output,out.step*out.rows); cudaMemcpy(d_input1,input1.ptr(),insize1,cudaMemcpyHostToDevice); cudaMemcpy(d_input2,input2.ptr(),insize2,cudaMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid( (row+block.x)/block.x , (col+block.y)/block.y); add2mat_kernel<<<grid,block>>>(d_input1,d_input2,input1.step,input2.step,row,col); cudaDeviceSynchronize(); cudaMemcpy(out.ptr(),d_input2,out.step*out.rows,cudaMemcpyDeviceToHost); output=out; } __global__ void pyrdown_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; const int color_tid = (xIndex)* aabhas + (3 * (yIndex)); const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex)); if(yIndex >=width || xIndex>=height) { // printf("return %d %d\n",xIndex,yIndex); return; } // printf("a=%d c=%d\n",aabhas,colorWidthStep); // printf("%d %d %d a=%d c=%d\n",xIndex,yIndex,d_in[color_tid1],aabhas,colorWidthStep); //cout<<xIndex<<" "<<yIndex<<endl; d_out[color_tid]=d_in[color_tid1]; d_out[color_tid+1]=d_in[color_tid1+1]; d_out[color_tid+2]=d_in[color_tid1+2]; //gaussian blur TODO /* 0.0000 0.0000 0.0002 0.0000 0.0000 0.0000 0.0113 0.0837 0.0113 0.0000 0.0002 0.0837 0.6187 0.0837 0.0002 0.0000 0.0113 0.0837 0.0113 0.0000 0.0000 0.0000 0.0002 0.0000 0.0000 */ /* float blur[5][5] ={ 0.0000 ,0.0000 , 0.0002 ,0.0000 ,0.0000, 0.0000 ,0.0113 , 0.0837 ,0.0113 ,0.0000, 0.0002 ,0.0837 , 0.6187 ,0.0837 ,0.0002, 0.0000 ,0.0113 , 0.0837 ,0.0113 ,0.0000, 0.0000 ,0.0000 , 0.0002 ,0.0000 ,0.0000 }; */ /* float blur[5][5] ={ {0.0025, 0.0125, 0.02 , 0.0125, 0.0025}, {0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.02 , 0.1 , 0.16 , 0.1 , 0.02 }, { 0.0125, 0.0625, 0.1 , 0.0625, 0.0125}, { 0.0025, 0.0125, 0.02 , 0.0125, 0.0025}}; int i,j; float output1,output2,output3; int loc; output1=0.0; output2=0.0; output3=0.0; for(i=-2;i<=2;i++) { for(j=-2;j<=2;j++) { if(2*xIndex+i<2*height && 2*yIndex+j <2*width) if(2*xIndex+i>=0 && 2*yIndex+j >=0) { //const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex)); loc= (2*xIndex+i )*colorWidthStep + (3*(2*yIndex+j)); output1+=blur[i+2][j+2]*d_in[loc]; output2+=blur[i+2][j+2]*d_in[loc+1]; output3+=blur[i+2][j+2]*d_in[loc+2]; } } } d_out[color_tid]=output1; d_out[color_tid+1]=output2; d_out[color_tid+2]=output3; // printf("%f %d %d\n",output1,d_in[color_tid1],int(output1)-d_in[color_tid1]); // d_out[color_tid]=d_in[color_tid1]; // d_out[color_tid+1]=d_in[color_tid1+1]; // d_out[color_tid+2]=d_in[color_tid1+2]; */ } void pyrdown(Mat &input,Mat& output_1,Mat &output_2,Mat& output_3,int mask=0) { int row=input.rows; int col=input.cols; int newrow=row/2; int newcol=col/2; const int insize=input.step*row; Mat output(newrow,newcol,CV_8UC3); unsigned char *d_input,*d_output,*d_output1,*d_output2,*d_temp;// *d_output; cudaMalloc<unsigned char>(&d_input,insize); cudaMalloc<unsigned char>(&d_temp,insize); //cout<<" insize"<<insize<<" d="<<newrow*newcol*sizeof(unsigned char)<<endl; cudaMalloc<unsigned char>(&d_output,output.step*output.rows); cudaMemcpy(d_input,input.ptr(),insize,cudaMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); const dim3 grid_1((input.rows+block.x)/block.x , (input.cols+block.y)/block.y ); GAUSSGPU1<<<grid_1,block>>>(d_input,d_temp,input.rows,input.cols,input.step,input.step); cudaDeviceSynchronize(); // Mat outputa1(input.rows,input.cols,CV_8UC3); // cudaMemcpy(outputa1.ptr(),d_temp,input.step*input.rows,cudaMemcpyDeviceToHost); // namedWindow("aabhas"); // imshow("aabhas",outputa1); // waitKey(0); pyrdown_kernel<<<grid,block>>>(d_temp,d_output,input.step,output.step,newrow,newcol); cudaDeviceSynchronize(); cudaMemcpy(output.ptr(),d_output,output.step*output.rows,cudaMemcpyDeviceToHost); cudaFree(d_temp); output_1=output; row=output.rows; col=output.cols; newrow=row/2; newcol=col/2; cv::Mat output1(newrow,newcol,CV_8UC3); const int insize1=output.step*row; cudaMalloc<unsigned char>(&d_temp,output.step*output.rows); cudaMalloc<unsigned char>(&d_output1,output.step*output.rows/4); const dim3 block1(16,16); const dim3 grid1( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); const dim3 grid_2( (row+block.x)/block.x , (col+block.y)/block.y ); GAUSSGPU1<<<grid_2,block>>>(d_output,d_temp,output.rows,output.cols,output.step,output.step); cudaDeviceSynchronize(); pyrdown_kernel<<<grid,block>>>(d_temp,d_output1,output.step,output1.step,newrow,newcol); cudaDeviceSynchronize(); cudaMemcpy(output1.ptr(),d_output1,output1.step*output1.rows,cudaMemcpyDeviceToHost); cudaFree(d_temp); output_2=output1; row=output1.rows; col=output1.cols; newrow=row/2; newcol=col/2; cv::Mat output2(newrow,newcol,CV_8UC3); cudaMalloc<unsigned char>(&d_temp,output1.step*output1.rows); const dim3 grid_3( (row+block.x)/block.x , (col+block.y)/block.y ); GAUSSGPU1<<<grid_3,block>>>(d_output1,d_temp,output1.rows,output1.cols,output1.step,output1.step); cudaMalloc<unsigned char>(&d_output2,output1.step*output1.rows/4); const dim3 block2(16,16); const dim3 grid2( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); pyrdown_kernel<<<grid,block>>>(d_temp,d_output2,output1.step,output2.step,newrow,newcol); cudaDeviceSynchronize(); cudaMemcpy(output2.ptr(),d_output2,output2.step*output2.rows,cudaMemcpyDeviceToHost); output_3=output2; } void pyrdownmask(Mat &input,Mat& output_1,Mat &output_2,Mat& output_3) { int row=input.rows; int col=input.cols; int newrow=row/2; int newcol=col/2; const int insize=input.step*row; Mat output(newrow,newcol,CV_8UC3); unsigned char *d_input,*d_output,*d_output1,*d_output2;// *d_output; cudaMalloc<unsigned char>(&d_input,insize); //cout<<" insize"<<insize<<" d="<<newrow*newcol*sizeof(unsigned char)<<endl; cudaMalloc<unsigned char>(&d_output,output.step*output.rows); cudaMemcpy(d_input,input.ptr(),insize,cudaMemcpyHostToDevice); const dim3 block(16,16); const dim3 grid( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); const dim3 grid_1((input.rows+block.x)/block.x , (input.cols+block.y)/block.y ); // Mat outputa1(input.rows,input.cols,CV_8UC3); // cudaMemcpy(outputa1.ptr(),d_temp,input.step*input.rows,cudaMemcpyDeviceToHost); // namedWindow("aabhas"); // imshow("aabhas",outputa1); // waitKey(0); pyrdown_kernel<<<grid,block>>>(d_input,d_output,input.step,output.step,newrow,newcol); cudaDeviceSynchronize(); cudaMemcpy(output.ptr(),d_output,output.step*output.rows,cudaMemcpyDeviceToHost); output_1=output; row=output.rows; col=output.cols; newrow=row/2; newcol=col/2; cv::Mat output1(newrow,newcol,CV_8UC3); const int insize1=output.step*row; cudaMalloc<unsigned char>(&d_output1,output.step*output.rows/4); const dim3 block1(16,16); const dim3 grid1( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); const dim3 grid_2( (row+block.x)/block.x , (col+block.y)/block.y ); pyrdown_kernel<<<grid,block>>>(d_output,d_output1,output.step,output1.step,newrow,newcol); cudaDeviceSynchronize(); cudaMemcpy(output1.ptr(),d_output1,output1.step*output1.rows,cudaMemcpyDeviceToHost); output_2=output1; row=output1.rows; col=output1.cols; newrow=row/2; newcol=col/2; cv::Mat output2(newrow,newcol,CV_8UC3); const dim3 grid_3( (row+block.x)/block.x , (col+block.y)/block.y ); cudaMalloc<unsigned char>(&d_output2,output1.step*output1.rows/4); const dim3 block2(16,16); const dim3 grid2( (newrow+block.x)/block.x , (newcol+block.y)/block.y ); pyrdown_kernel<<<grid,block>>>(d_output1,d_output2,output1.step,output2.step,newrow,newcol); cudaDeviceSynchronize(); cudaMemcpy(output2.ptr(),d_output2,output2.step*output2.rows,cudaMemcpyDeviceToHost); output_3=output2; } __global__ void imageblend_kernel(unsigned char *d_input1,unsigned char *d_input2,int width,int height,int colorWidthStep,int aabhas,unsigned char *mask,int maskstep) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if((xIndex>=width) || (yIndex>=height)) return; const int color_tid1 = (yIndex)* aabhas + (3 * (xIndex)); const int color_tid2 = (yIndex)* colorWidthStep + (3 * (xIndex)); const int color_mask=(yIndex)*maskstep+(3*(xIndex)); float m= ( unsigned char)mask[color_mask]; if(mask[color_mask]!=255 && mask[color_mask]!=0) printf("c=%d %d %d %d\n",( unsigned char)mask[color_mask],xIndex,yIndex,height); float m1=m/255.0; float m2=1-m1; // printf("%d %d %d\n",mask[color_mask],mask[color_mask+1],mask[color_mask+2]); int x=d_input2[color_tid2]; d_input2[color_tid2]=static_cast<unsigned char> ((m1)* d_input2[color_tid2] +(m2)*d_input1[color_tid1]); // if(m1<=0.58 && m1>=0.22) // printf("%f %f %d %d %d\n %d %d %d\n",m1,m2,xIndex,yIndex, mask[color_mask],d_input2[color_tid2],x,d_input1[color_tid1]); m= ( unsigned char)mask[color_mask+1]; m1=m/255.0; m2=1-m1; // printf("2%f %f %d %d \n",m1,m2,xIndex,yIndex); d_input2[color_tid2+1]=static_cast<unsigned char>((m1)* d_input2[color_tid2+1] +(m2)*d_input1[color_tid1+1]); m= ( unsigned char)mask[color_mask+2]; m1=m/255.0; m2=1-m1; // printf("3%f %f %d %d \n",m1,m2,xIndex,yIndex); d_input2[color_tid2+2]=static_cast<unsigned char> ((m1)* d_input2[color_tid2+2] +(m2)*d_input1[color_tid1+2]); // d_input2[color_tid2+1]=(beta/100.0)* d_input2[color_tid2+1] +(alpha/100.0)*d_input1[color_tid1+1]; // d_input2[color_tid2+2]=(beta/100.0)* d_input2[color_tid2+2] +(alpha/100.0)*d_input1[color_tid1+2] ; // printf("%f\n",m); // d_input2[color_tid]= d_input // float alpha=100,beta=0; //if((startx1+xIndex<width) && (starty1+yIndex<height)) { // if((startx+xIndex<=endx) && (starty+yIndex<=endy)) { //const int color_tid1 = (yIndex +starty)* aabhas + (3 * (xIndex+startx)); // const int color_tid2 = (yIndex +starty1)* colorWidthStep + (3 * (xIndex+startx1)); //int a=d_input2[color_tid2]; // d_input2[color_tid2]=(beta/100.0)* d_input2[color_tid2] +(alpha/100.0)*d_input1[color_tid1]; // d_input2[color_tid2+1]=(beta/100.0)* d_input2[color_tid2+1] +(alpha/100.0)*d_input1[color_tid1+1]; // d_input2[color_tid2+2]=(beta/100.0)* d_input2[color_tid2+2] +(alpha/100.0)*d_input1[color_tid1+2] ; } } } void blendimage(Mat& input1 , Mat& input2 ,Mat& output1,int scale,Mat & mask) { const int insize1=input1.step * input1.rows; const int insize2=input2.step * input2.rows; const int masksize=mask.step * mask.rows; unsigned char *d_input1,*d_input2,*d_mask;// *d_output; int x,y; Mat img=mask; for(x=0;x<img.cols;x++) for(y=0;y<img.rows;y++) if(img.at<cv::Vec3b>(y,x)[0]!=0 &&img.at<cv::Vec3b>(y,x)[0]!=255) { cout<<"\nmask fail\n"<<endl; } // cout<<insize1<<" "<<insize2<<" --- "<<mask.step*mask.rows<<endl; // cout<<mask.cols<<" m "<<mask.rows<<" "<<mask.step<<endl; // cout<<input1.cols<<" 1 "<<input1.rows<<" "<<input1.step<<endl; // cout<<input2.cols<<" 2 "<<input2.rows<<" "<<input2.step<<endl; cudaMalloc<unsigned char>(&d_input1,insize1); cudaMalloc<unsigned char>(&d_input2,insize2); cudaMalloc<unsigned char>(&d_mask,masksize); cudaMemcpy(d_input1,input1.ptr(),insize1,cudaMemcpyHostToDevice); cudaMemcpy(d_input2,input2.ptr(),insize2,cudaMemcpyHostToDevice); cudaMemcpy(d_mask,mask.ptr(),masksize,cudaMemcpyHostToDevice); const dim3 block(16,16); Mat output(input2.rows,input2.cols,CV_8UC3); /* startx=startx/scale; starty=starty/scale; startx1=startx1/scale; starty1=starty1/scale; endx=endx/scale; endy=endy/scale;*/ const dim3 grid((input2.cols + block.x )/block.x, (input2.rows + block.y )/block.y); imageblend_kernel<<<grid,block>>>(d_input1,d_input2,input2.cols,input2.rows,input2.step,input1.step,d_mask,mask.step); cudaDeviceSynchronize(); cudaMemcpy(output.ptr(),d_input2,insize2,cudaMemcpyDeviceToHost); startx=startx*scale; starty=starty*scale; startx1=startx1*scale; starty1=starty1*scale; endx=endx*scale; endy=endy*scale; output1=output; } void display(Mat &img) { namedWindow("debug",1); imshow("debug",img); waitKey(0); } int main(int argc, char** argv) { int debug=0; // Read image from file // cout<<"Enter the vale of alpha and beta\n"; // cin>>alpha>>beta; Mat img1 = imread("dataset/pepper.jpg"); Mat mask_1= imread("dataset/mask.jpg"); //if fail to read the image if ( img1.empty() ) { cout << "Error loading the image 1" << endl; return -1; } //Create a window namedWindow("Mask", 1); imshow("Mask",mask_1); waitKey(0); namedWindow("Image 1", 1); //set the callback function for any mouse event setMouseCallback("Image 1", CallBackFunc_1, NULL); //show the image imshow("Image 1", img1); // Wait until user press some key waitKey(0); cout<<"position of first\n"<<startx<<" "<<starty<<" "<<endx<<" "<<endy<<endl; Mat img2=imread("dataset/snow.jpg"); if ( img2.empty() ) { cout << "Error loading the image 2" << endl; return -1; } namedWindow("Image 2",CV_WINDOW_AUTOSIZE); setMouseCallback("Image 2", CallBackFunc_2, NULL); //show the image imshow("Image 2", img2); waitKey(0); cout<<"position of second\n"<<startx1<<" "<<starty1<<endl; struct timespec t1, t2; clock_gettime(CLOCK_MONOTONIC, &t1); int newrow=img1.rows/2; int newcol=img1.cols/2; //cv::Mat output(img1);//(newrow,newcol,CV_8UC3);; cv::Mat output(newrow,newcol,CV_8UC3);; Mat output_1,output_2,output_3; // imshow("output",img1); // waitKey(0); cv::Mat finaloutput; Mat mask_2,mask_3,mask_4; // pyrdown(mask_1,mask_2,mask_3,mask_4); pyrdownmask(mask_1,mask_2,mask_3,mask_4); // display(mask_3); // cout<<"mat=\n"<<mask_1<<endl; pyrdown(img1,output_1,output_2,output_3); // cv::pyrDown(img1,output); // blendimage(img1,img2,output); if(debug==1) { namedWindow("output_0",1); imshow("output_0",img1); waitKey(0); namedWindow("output_1",1); imshow("output_1",output_1); waitKey(0); namedWindow("output_2",1); imshow("output_2",output_2); waitKey(0); namedWindow("output_3",1); imshow("output_3",output_3); waitKey(0); } // cv::Mat output1(newrow*2,newcol*2,CV_8UC3); Mat pyoutput_1,pyoutput_2,pyoutput_3; pyrup(output_1,pyoutput_1); pyrup(output_2,pyoutput_2); pyrup(output_3,pyoutput_3); if(debug==1) { namedWindow("showall",1); imshow("showall",pyoutput_1); waitKey(0); imshow("showall",pyoutput_2); waitKey(0); imshow("showall",pyoutput_3); waitKey(0); } //cout<<"Mat="<<pyoutput_3<<endl; // namedWindow("output",1); // imshow("output",pyoutput_1-img1); // waitKey(0); if(1==1) { Mat LA2,LA1,LA0; Mat LA3=output_3; submat(img1,pyoutput_1,LA0); submat(output_1,pyoutput_2,LA1); submat(output_2,pyoutput_3,LA2); // cout<<LA0<<endl; // display(LA0); if(debug==1) { namedWindow("submat1",1); imshow("submat1",LA0); waitKey(0); namedWindow("submat2",1); imshow("submat2",LA1); waitKey(0); namedWindow("submat3",1); imshow("submat3",LA2); waitKey(0); namedWindow("submat4",1); imshow("submat4",LA3); waitKey(0); } Mat output1_1,output1_2,output1_3; pyrdown(img2,output1_1,output1_2,output1_3); // namedWindow("output_0",1); if(debug==1) { imshow("output_0",img2); waitKey(0); // namedWindow("output_1",1); imshow("output_1",output1_1); waitKey(0); // namedWindow("output_2",1); imshow("output_2",output1_2); waitKey(0); // namedWindow("output_3",1); imshow("output_3",output1_3); waitKey(0); } Mat pyoutput1_1,pyoutput1_2,pyoutput1_3; pyrup(output1_1,pyoutput1_1); pyrup(output1_2,pyoutput1_2); pyrup(output1_3,pyoutput1_3); Mat LB2,LB1,LB0; Mat LB3=output1_3; submat(img2,pyoutput1_1,LB0); submat(output1_1,pyoutput1_2,LB1); submat(output1_2,pyoutput1_3,LB2); Mat LS3,LS2,LS1,LS0; // namedWindow("submat1",1); if(debug==1) { imshow("submat1",LB0); waitKey(0); // namedWindow("submat2",1); imshow("submat2",LB1); waitKey(0); // namedWindow("submat3",1); imshow("submat3",LB2); waitKey(0); // namedWindow("submat4",1); imshow("submat4",LB3); waitKey(0); // cout<<LA0.rows<<" "<<LA0.cols<<endl; // cout<<LB0.rows<<" "<<LB0.cols<<endl; } int gauss=0; if(gauss==1) GaussianBlur(mask_1,mask_1,Size( 7, 7), 0, 0); blendimage(LA0,LB0,LS0,1,mask_1); // imwrite("debug/mask_1.jpg",mask_1); // display(LS0); if(debug==1) { namedWindow("LS0",1); imshow("LS0",LS0); waitKey(0); } if(gauss==1) GaussianBlur(mask_2,mask_2,Size( 7, 7), 0, 0); blendimage(LA1,LB1,LS1,2,mask_2); // cout<<mask_2<<endl; // imwrite("debug/mask_2.jpg",mask_2); if(debug==1) { namedWindow("LS1",1); imshow("LS1",LS1); waitKey(0); } if(gauss==1) GaussianBlur(mask_3,mask_3,Size( 7, 7), 0, 0); blendimage(LA2,LB2,LS2,4,mask_3); // imwrite("debug/mask_3.jpg",mask_3); if(debug==1) { namedWindow("LS2",1); imshow("LS2",LS2); waitKey(0); } if(gauss==1) GaussianBlur(mask_4,mask_4,Size( 7, 7), 0, 0); GaussianBlur(mask_4,mask_4,Size( 7, 7), 0, 0); blendimage(LA3,LB3,LS3,8,mask_4); // cout<<mask_4; // imwrite("debug/mask_4.jpg",mask_4); if(debug==1) { namedWindow("LS3",1); imshow("LS3",LS3); waitKey(0); } Mat final0,final1,final2,final3; add2mat(LS3,LS2,final3); if(debug==1) { namedWindow("final3",1); imshow("final3",final3); waitKey(0); } add2mat(final3,LS1,final2); if(debug==1) { namedWindow("final2",1); imshow("final2",final2); waitKey(0); } clock_gettime(CLOCK_MONOTONIC, &t2); float time = ((t2.tv_sec - t1.tv_sec)*1000) + (((double)(t2.tv_nsec - t1.tv_nsec))/1000000.0); printf("Time (in milliseconds): %f\n", time); add2mat(final2,LS0,final1); namedWindow("final1",1); imshow("final1",final1); waitKey(0); imwrite("final1.jpg",final1); // Mat yo; // medianBlur(final1, yo, 3 ); // GaussianBlur(final1,yo,Size( 7, 7), 0, 0); // display(yo); } /* add2mat(LS0,final1,final0); namedWindow("final0",1); imshow("final0",final0); waitKey(0);*/ /* pyrup(output_2,pyoutput_2); imshow("output",pyoutput_2-output_1); waitKey(0); pyrup(output_3,pyoutput_3); imshow("output",pyoutput_3-output_2); waitKey(0);*/ // pyrdown(img1,output); // cout<<img1.rows<<" "<<img1.cols<<" "<<output.rows<<" "<<output.cols<<endl; return 0; }
bec60c03bca4c36da4dd379e30a5e3dc18013ff9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts, float alpha_, float gamma_) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { //loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], // Dtype(FLT_MIN))); Dtype pt = prob_data[n * dim + label_value * spatial_dim + s]; loss[index] = -alpha_ * powf(1 - pt, gamma_) * log(max(pt, Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts, alpha_, gamma_); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype normalizer = LossLayer<Dtype>::GetNormalizer( normalization_, outer_num_, inner_num_, valid_count); top[0]->mutable_cpu_data()[0] = loss / normalizer; if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts, float alpha_, float gamma_) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { Dtype pt = bottom_diff[n * dim + label_value * spatial_dim + s]; for (int c = 0; c < channels; ++c) { if(c == label_value){ bottom_diff[n * dim + c * spatial_dim + s] = alpha_ * powf(1 - pt, gamma_) * (gamma_ * pt * log(max(pt, Dtype(FLT_MIN))) + pt - 1); } else{ Dtype pc = bottom_diff[n * dim + c * spatial_dim + s]; bottom_diff[n * dim + c * spatial_dim + s] = alpha_ * (powf(1 - pt, gamma_ - 1) * (-gamma_ * log(max(pt, Dtype(FLT_MIN))) * pt * pc) + powf(1 - pt, gamma_) * pc); } } counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts, alpha_, gamma_); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype normalizer = LossLayer<Dtype>::GetNormalizer( normalization_, outer_num_, inner_num_, valid_count); const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer; caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
bec60c03bca4c36da4dd379e30a5e3dc18013ff9.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts, float alpha_, float gamma_) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { //loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], // Dtype(FLT_MIN))); Dtype pt = prob_data[n * dim + label_value * spatial_dim + s]; loss[index] = -alpha_ * powf(1 - pt, gamma_) * log(max(pt, Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts, alpha_, gamma_); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype normalizer = LossLayer<Dtype>::GetNormalizer( normalization_, outer_num_, inner_num_, valid_count); top[0]->mutable_cpu_data()[0] = loss / normalizer; if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts, float alpha_, float gamma_) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { Dtype pt = bottom_diff[n * dim + label_value * spatial_dim + s]; for (int c = 0; c < channels; ++c) { if(c == label_value){ bottom_diff[n * dim + c * spatial_dim + s] = alpha_ * powf(1 - pt, gamma_) * (gamma_ * pt * log(max(pt, Dtype(FLT_MIN))) + pt - 1); } else{ Dtype pc = bottom_diff[n * dim + c * spatial_dim + s]; bottom_diff[n * dim + c * spatial_dim + s] = alpha_ * (powf(1 - pt, gamma_ - 1) * (-gamma_ * log(max(pt, Dtype(FLT_MIN))) * pt * pc) + powf(1 - pt, gamma_) * pc); } } counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts, alpha_, gamma_); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype normalizer = LossLayer<Dtype>::GetNormalizer( normalization_, outer_num_, inner_num_, valid_count); const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer; caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
a8b900c1793ec1d2b572df046f3b44f489f6bfa2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //if (propagate_down[1]) { //LOG(FATAL) << this->type() // << " Layer cannot backpropagate to label inputs."; //} if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
a8b900c1793ec1d2b572df046f3b44f489f6bfa2.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { //if (propagate_down[1]) { //LOG(FATAL) << this->type() // << " Layer cannot backpropagate to label inputs."; //} if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
b545b1db8698b529708bd946a7cb78436235d2d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zmergecg.cu normal z -> d, Sat Nov 15 19:54:21 2014 @author Hartwig Anzt */ #include "common_magma.h" #include "magmasparse.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from dmergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_dcgreduce_kernel_spmv1( int Gs, int n, magmaDouble_ptr vtmp, magmaDouble_ptr vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_dcgmerge_spmvcsr_kernel( int n, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if( i<n ){ double dot = MAGMA_D_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * d[ dcolind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_dcgmerge_spmvell_kernel( int n, int num_cols_per_row, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < n ){ double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = dcolind [ n * k + i ]; double val = dval [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_dcgmerge_spmvellpack_kernel( int n, int num_cols_per_row, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < n ){ double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = dcolind [ num_cols_per_row * i + k ]; double val = dval [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_8( int n, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_16( int n, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_32( int n, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_dcgmerge_spmvellpackrt_kernel2( int n, magmaDouble_ptr z, magmaDouble_ptr d, magmaDouble_ptr vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_D_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_dcgmerge_spmvsellc_kernel( int num_rows, int blocksize, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++){ int col = dcolind [offset+ blocksize * n + Idx ]; double val = dval[offset+ blocksize * n + Idx]; if( val != 0){ dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr d, magmaDouble_ptr z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr d, magmaDouble_ptr z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr d, magmaDouble_ptr z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_dcg_rhokernel( magmaDouble_ptr skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param[in] A magma_d_sparse_matrix input matrix @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] dd magmaDouble_ptr input vector d @param[out] dz magmaDouble_ptr input vector z @param[out] skp magmaDouble_ptr array for parameters ( skp[3]=rho ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dcgmerge_spmv1( magma_d_sparse_matrix A, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (A.num_rows+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR ) hipLaunchKernelGGL(( magma_dcgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, queue , A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELLPACKT ) hipLaunchKernelGGL(( magma_dcgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, queue , A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELL ) hipLaunchKernelGGL(( magma_dcgmerge_spmvell_kernel), dim3(Gs), dim3(Bs), Ms, queue , A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_SELLP ) { int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = sqrt(A.numblocks); int dimgrid2 = (A.numblocks + dimgrid1 -1 ) / dimgrid1; dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( double ); if ( A.alignment == 8) hipLaunchKernelGGL(( magma_dcgmerge_spmvsellpt_kernel_8) , dim3(gridsellp), dim3(block), Mssellp, queue , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 16) hipLaunchKernelGGL(( magma_dcgmerge_spmvsellpt_kernel_16) , dim3(gridsellp), dim3(block), Mssellp, queue , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 32) hipLaunchKernelGGL(( magma_dcgmerge_spmvsellpt_kernel_32) , dim3(gridsellp), dim3(block), Mssellp, queue , A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue , A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_ELLRT ) { // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize); int num_threads = A.alignment*A.blocksize; int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment) *A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = sqrt(num_blocks); int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1; dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( double ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( A.alignment == 32 ) { hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel_32) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 16 ) { hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel_16) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 8 ) { hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel_8) , dim3(gridellrt), dim3(num_threads) , Mellrt, queue , A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", A.alignment); return MAGMA_ERR_NOT_SUPPORTED; } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. hipLaunchKernelGGL(( magma_dcgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, queue , A.num_rows, dz, dd, d1 ); } while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dcgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+4, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_dcg_rhokernel), dim3(Gs2), dim3(Bs2), 0, 0, skp ); magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_dcgmerge_xrbeta_kernel( int n, magmaDouble_ptr x, magmaDouble_ptr r, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr skp, magmaDouble_ptr vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; double rho = skp[3]; double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if( i<n ){ x[i] += rho * d[i] ; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_dcg_alphabetakernel( magmaDouble_ptr skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_D_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_dcg_d_kernel( int n, magmaDouble_ptr skp, magmaDouble_ptr r, magmaDouble_ptr d ) { int i = blockIdx.x * blockDim.x + threadIdx.x; double alpha = skp[0]; if( i<n ){ d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in/out] dx magmaDouble_ptr input vector x @param[in/out] dr magmaDouble_ptr input/output vector r @param[in] dd magmaDouble_ptr input vector d @param[in] dz magmaDouble_ptr input vector z @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dsygpuk ********************************************************************/ extern "C" magma_int_t magma_dcgmerge_xrbeta( int n, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dx, magmaDouble_ptr dr, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (n+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_dcgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, 0, n, dx, dr, dd, dz, skp, d1); while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_dcgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0, Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+1, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_dcg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp ); dim3 Bs3( local_block_size ); dim3 Gs3( (n+local_block_size-1)/local_block_size ); hipLaunchKernelGGL(( magma_dcg_d_kernel), dim3(Gs3), dim3(Bs3), 0, 0, n, skp, dr, dd ); magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
b545b1db8698b529708bd946a7cb78436235d2d3.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zmergecg.cu normal z -> d, Sat Nov 15 19:54:21 2014 @author Hartwig Anzt */ #include "common_magma.h" #include "magmasparse.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from dmergecg into one // for a description see // "Reformulated Conjugate Gradient for the Energy-Aware // Solution of Linear Systems on GPUs (ICPP '13) // accelerated reduction for one vector __global__ void magma_dcgreduce_kernel_spmv1( int Gs, int n, magmaDouble_ptr vtmp, magmaDouble_ptr vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_D_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using CSR and the first step of the reduction __global__ void magma_dcgmerge_spmvcsr_kernel( int n, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if( i<n ){ double dot = MAGMA_D_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * d[ dcolind[j] ]; z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELL and the first step of the reduction __global__ void magma_dcgmerge_spmvell_kernel( int n, int num_cols_per_row, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < n ){ double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = dcolind [ n * k + i ]; double val = dval [ n * k + i ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLPACK and the first step of the reduction __global__ void magma_dcgmerge_spmvellpack_kernel( int n, int num_cols_per_row, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < n ){ double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int k = 0; k < num_cols_per_row ; k ++){ int col = dcolind [ num_cols_per_row * i + k ]; double val = dval [ num_cols_per_row * i + k ]; if( val != 0) dot += val * d[ col ]; } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_8( int n, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_16( int n, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // computes the SpMV using ELLRT 8 threads per row __global__ void magma_dcgmerge_spmvellpackrt_kernel_32( int n, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp, magma_int_t T, magma_int_t alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ double shared[]; if(i < n ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //double val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) double val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * d[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { z[i] = (shared[idb]+shared[idb+1]); } } } } // additional kernel necessary to compute first reduction step __global__ void magma_dcgmerge_spmvellpackrt_kernel2( int n, magmaDouble_ptr z, magmaDouble_ptr d, magmaDouble_ptr vtmp2 ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_D_MAKE(0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } // computes the SpMV using SELLC __global__ void magma_dcgmerge_spmvsellc_kernel( int num_rows, int blocksize, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr vtmp) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if(i < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < border; n ++){ int col = dcolind [offset+ blocksize * n + Idx ]; double val = dval[offset+ blocksize * n + Idx]; if( val != 0){ dot=dot+val*d[col]; } } z[ i ] = dot; temp[ Idx ] = d[ i ] * dot; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_8( int num_rows, int blocksize, int T, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr d, magmaDouble_ptr z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_16( int num_rows, int blocksize, int T, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr d, magmaDouble_ptr z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void magma_dcgmerge_spmvsellpt_kernel_32( int num_rows, int blocksize, int T, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr d, magmaDouble_ptr z) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * d[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { z[row] = (shared[ldx]+shared[ldx+blocksize*1]); } } } } // kernel to handle scalars __global__ void // rho = beta/tmp; gamma = beta; magma_dcg_rhokernel( magmaDouble_ptr skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp = skp[1]; skp[3] = tmp/skp[4]; skp[2] = tmp; } } /** Purpose ------- Merges the first SpmV using different formats with the dot product and the computation of rho Arguments --------- @param[in] A magma_d_sparse_matrix input matrix @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in] dd magmaDouble_ptr input vector d @param[out] dz magmaDouble_ptr input vector z @param[out] skp magmaDouble_ptr array for parameters ( skp[3]=rho ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dcgmerge_spmv1( magma_d_sparse_matrix A, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (A.num_rows+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR ) magma_dcgmerge_spmvcsr_kernel<<<Gs, Bs, Ms, queue >>> ( A.num_rows, A.dval, A.drow, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELLPACKT ) magma_dcgmerge_spmvellpack_kernel<<<Gs, Bs, Ms, queue >>> ( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_ELL ) magma_dcgmerge_spmvell_kernel<<<Gs, Bs, Ms, queue >>> ( A.num_rows, A.max_nnz_row, A.dval, A.dcol, dd, dz, d1 ); else if ( A.storage_type == Magma_SELLP ) { int num_threadssellp = A.blocksize*A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threadssellp > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( A.blocksize, A.alignment, 1); int dimgrid1 = sqrt(A.numblocks); int dimgrid2 = (A.numblocks + dimgrid1 -1 ) / dimgrid1; dim3 gridsellp( dimgrid1, dimgrid2, 1); int Mssellp = num_threadssellp * sizeof( double ); if ( A.alignment == 8) magma_dcgmerge_spmvsellpt_kernel_8 <<< gridsellp, block, Mssellp, queue >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 16) magma_dcgmerge_spmvsellpt_kernel_16 <<< gridsellp, block, Mssellp, queue >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else if ( A.alignment == 32) magma_dcgmerge_spmvsellpt_kernel_32 <<< gridsellp, block, Mssellp, queue >>> ( A.num_rows, A.blocksize, A.alignment, A.dval, A.dcol, A.drow, dd, dz); else printf("error: alignment not supported.\n"); // in case of using SELLP, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_dcgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, queue >>> ( A.num_rows, dz, dd, d1 ); } else if ( A.storage_type == Magma_ELLRT ) { // in case of using ELLRT, we need a different grid, assigning // threads_per_row processors to each row // the block size is num_threads // fixed values int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize); int num_threads = A.alignment*A.blocksize; int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment) *A.alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = sqrt(num_blocks); int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1; dim3 gridellrt( dimgrid1, dimgrid2, 1); int Mellrt = A.alignment * A.blocksize * sizeof( double ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( A.alignment == 32 ) { magma_dcgmerge_spmvellpackrt_kernel_32 <<< gridellrt, num_threads , Mellrt, queue >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 16 ) { magma_dcgmerge_spmvellpackrt_kernel_16 <<< gridellrt, num_threads , Mellrt, queue >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else if ( A.alignment == 8 ) { magma_dcgmerge_spmvellpackrt_kernel_8 <<< gridellrt, num_threads , Mellrt, queue >>> ( A.num_rows, A.dval, A.dcol, A.drow, dd, dz, d1, A.alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", A.alignment); return MAGMA_ERR_NOT_SUPPORTED; } // in case of using ELLRT, we can't efficiently merge the // dot product and the first reduction loop into the SpMV kernel // as the SpMV grid would result in low occupancy. magma_dcgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, queue >>> ( A.num_rows, dz, dd, d1 ); } while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dcgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, A.num_rows, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+4, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_dcg_rhokernel<<<Gs2, Bs2, 0>>>( skp ); magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // updates x and r and computes the first part of the dot product r*r __global__ void magma_dcgmerge_xrbeta_kernel( int n, magmaDouble_ptr x, magmaDouble_ptr r, magmaDouble_ptr d, magmaDouble_ptr z, magmaDouble_ptr skp, magmaDouble_ptr vtmp ) { extern __shared__ double temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; double rho = skp[3]; double mrho = MAGMA_D_MAKE( -1.0, 0.0)*rho; temp[ Idx ] = MAGMA_D_MAKE( 0.0, 0.0); if( i<n ){ x[i] += rho * d[i] ; r[i] += mrho * z[i]; temp[ Idx ] = r[i] * r[i]; } __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 16 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 8 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 4 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 2 ];__syncthreads(); temp[ Idx ] += temp[ Idx + 1 ];__syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // kernel to handle scalars __global__ void //alpha = beta / gamma magma_dcg_alphabetakernel( magmaDouble_ptr skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp1 = skp[1]; skp[0] = tmp1/skp[2]; //printf("beta=%e\n", MAGMA_D_REAL(tmp1)); } } // update search Krylov vector d __global__ void magma_dcg_d_kernel( int n, magmaDouble_ptr skp, magmaDouble_ptr r, magmaDouble_ptr d ) { int i = blockIdx.x * blockDim.x + threadIdx.x; double alpha = skp[0]; if( i<n ){ d[i] = r[i] + alpha * d[i]; } } /** Purpose ------- Merges the update of r and x with the dot product and performs then the update for the Krylov vector d Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDouble_ptr temporary vector @param[in] d2 magmaDouble_ptr temporary vector @param[in/out] dx magmaDouble_ptr input vector x @param[in/out] dr magmaDouble_ptr input/output vector r @param[in] dd magmaDouble_ptr input vector d @param[in] dz magmaDouble_ptr input vector z @param[in] skp magmaDouble_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dsygpuk ********************************************************************/ extern "C" magma_int_t magma_dcgmerge_xrbeta( int n, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr dx, magmaDouble_ptr dr, magmaDouble_ptr dd, magmaDouble_ptr dz, magmaDouble_ptr skp, magma_queue_t queue ) { // set queue for old dense routines magma_queue_t orig_queue; magmablasGetKernelStream( &orig_queue ); int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( (n+local_block_size-1)/local_block_size ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( double ); magmaDouble_ptr aux1 = d1, aux2 = d2; int b = 1; magma_dcgmerge_xrbeta_kernel<<<Gs, Bs, Ms>>> ( n, dx, dr, dd, dz, skp, d1); while( Gs.x > 1 ) { Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ; if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_dcgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_dcopyvector( 1, aux1, 1, skp+1, 1 ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_dcg_alphabetakernel<<<Gs2, Bs2, 0>>>( skp ); dim3 Bs3( local_block_size ); dim3 Gs3( (n+local_block_size-1)/local_block_size ); magma_dcg_d_kernel<<<Gs3, Bs3, 0>>>( n, skp, dr, dd ); magmablasSetKernelStream( orig_queue ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
077eaeaebc961864b67dc87b1d0529f8328cc63b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<thrust/host_vector.h> #include<thrust/device_vector.h> #include<stdlib.h> #include<stdio.h> #include<thrust/sort.h> #include<math.h> #include<cuda.h> using namespace std; __global__ void k1(long *gdata,long *gquery,long *gres,int *gid,int N,int count) { int id = blockIdx.x*blockDim.x+threadIdx.x;; //gres[id*2+0] = id; gid[id] = id; float dist = 0; for(int i=1;i<count-1;i++){ //printf("%d\t%0.2f\t%0.2f\n",id,gdata[id*count+i],gquery[i]); dist += (gdata[id*count+i]-gquery[i])*(gdata[id*count+i]-gquery[i]); } gres[id] = sqrt(dist); //printf("%d %0.2f\n",id,gres[id]); } __global__ void maxk(long *data,long *query,long *res,int *gid,int N,int count){ int id = blockIdx.x*blockDim.x+threadIdx.x; int i = id%N; int j = id/N; float dis = 0; for(int k=1;k<count-1;k++){ dis +=((data[i*count+k]-query[j*count+k])*(data[i*count+k]-query[j*count+k])); } //printf("%d\n",id); res[id] = sqrt(dis); gid[id] = id; } __global__ void Accuracy(long *query,long *result,int count,int *counter){ int id = threadIdx.x; //printf("%d %d\n",s1[id],s2[id]); int x = 1; if(query[id*count+10]==result[id]){ atomicAdd(&counter[0],x); } } int main(){ int k = 3 ; FILE *fp; int N = 10000; int count = 11; fp = fopen("binput.txt","r"); char ch = ' '; long *data = (long *)malloc(N*count*sizeof(long)); for(int i=0;i<N;i++){ for(int j=0;j<count;j++){ fscanf(fp,"%ld",&data[i*count+j]); ch = fgetc(fp); //cout<<data[i*count+j]<<"\t"; } //cout<<"\n"; } int m = 100; count = 11; FILE *op; op = fopen("bitest.txt","r"); long *query = (long *)malloc(m*count*sizeof(long)); for(int i=0;i<m;i++){ for(int j=0;j<count;j++){ fscanf(op,"%ld",&query[i*count+j]); ch = fgetc(op); //cout<<query[i*count+j]<<"\t"; } //cout<<"\n"; } long *result = (long *)malloc(m*sizeof(long)); long *gquery,*gdata,*res,*gres; int *id,*gid; hipEvent_t start1, stop1; hipEventCreate(&start1); hipEventCreate(&stop1); float ms = 0; for(int i=0;i<m;i++){ long *point = (long *)malloc(count*sizeof(long)); for(int j=0;j<count;j++){ point[j] = query[i*count+j]; } float milliseconds1 = 0; hipEventRecord(start1,0); hipMalloc(&gquery,count*sizeof(long)); hipMalloc(&gdata,N*count*sizeof(long)); hipMalloc(&gres,N*sizeof(long)); hipMalloc(&gid,N*sizeof(int)); res = (long *)malloc(N*sizeof(long)); id = (int *)malloc(N*sizeof(int)); hipMemcpy(gdata,data,N*count*sizeof(long),hipMemcpyHostToDevice); hipMemcpy(gquery,point,count*sizeof(long),hipMemcpyHostToDevice); //Launching one test point to all train point kernal hipLaunchKernelGGL(( k1), dim3(16),dim3(N/16), 0, 0, gdata,gquery,gres,gid,N,count); hipMemcpy(res,gres,N*sizeof(long),hipMemcpyDeviceToHost); hipMemcpy(id,gid,N*sizeof(int),hipMemcpyDeviceToHost); hipEventRecord(stop1,0); hipEventSynchronize(stop1); hipEventElapsedTime(&milliseconds1, start1, stop1); ms+=milliseconds1; thrust::sort_by_key(res, res + N, id); //cout<<"\n============================\n"; int count1,count2; count1 = count2 = 0; for(int j=0;j<k;j++){ //cout<<i<<" "<<id[j]<<" "<<res[j]<<"\n"; //cout<<id[j]<<" "<<data[id[j]*count+10]<<"\n"; if(data[id[j]*count+10]==2){ count1++; } if(data[id[j]*count+10]==4){ count2++; } } //cout<<count1<<" "<<count2<<"\n"; if(count1>count2){ result[i] = 2; } else{ result[i] = 4; } } int *gcounter; int counter[1]; long *gresult,*ggquery; hipMalloc(&gresult,m*sizeof(long)); hipMalloc(&ggquery,m*count*sizeof(long)); counter[0] = 0; hipMalloc(&gcounter,1*sizeof(int)); hipMemcpy(gcounter,counter,1*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(ggquery,query,m*count*sizeof(long),hipMemcpyHostToDevice); hipMemcpy(gresult,result,m*sizeof(long),hipMemcpyHostToDevice); hipLaunchKernelGGL(( Accuracy), dim3(1),dim3(m), 0, 0, ggquery,gresult,count,gcounter); hipMemcpy(counter,gcounter,1*sizeof(int),hipMemcpyDeviceToHost); printf(" Total time taken %f\n",ms); //cout<<counter[0]; float acc = counter[0]*100; acc = acc/m; cout<<"Accuracy of KNN "<<acc<<"\n"; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; hipEventRecord(start,0); int *id2d,*gid2d; long *gdata2d,*gquery2d,*gres2d,*res2d; hipMalloc(&gid2d,m*N*sizeof(int)); id2d = (int *)malloc(m*N*sizeof(int)); res2d = (long *)malloc(m*N*sizeof(long)); hipMalloc(&gres2d,m*N*sizeof(long)); hipMalloc(&gdata2d,N*count*sizeof(long)); hipMalloc(&gquery2d,m*count*sizeof(long)); hipMemcpy(gdata2d,data,N*count*sizeof(long),hipMemcpyHostToDevice); hipMemcpy(gquery2d,query,m*count*sizeof(long),hipMemcpyHostToDevice); hipLaunchKernelGGL(( maxk), dim3(16*m),dim3(N/16), 0, 0, gdata2d,gquery2d,gres2d,gid2d,N,count); hipMemcpy(id2d,gid2d,m*N*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(res2d,gres2d,m*N*sizeof(long),hipMemcpyDeviceToHost); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("Total time taken %f\n",milliseconds); for(int i=0;i<m;i++){ //cout<<"Line"<<i<<"\t"; long *dist = (long *)malloc(N*sizeof(long)); int *im = (int *)malloc(N*sizeof(int)); for(int j=0;j<N;j++){ //cout<<res2d[i*N+j]<<"\t"; im[j] = id2d[i*N+j]%N; dist[j] = res2d[i*N+j]; } thrust::sort_by_key(dist, dist + N, im); int count1,count2; count1 = count2 = 0; for(int j=0;j<k;j++){ //cout<<im[j]<<"\t"; if(data[im[j]*count+10]==2){ count1++; } if(data[im[j]*count+10]==4){ count2++; } } if(count1>count2){ result[i] = 2; } else{ result[i] = 4; } //cout<<result[i]<<"\n"; //cout<<count1<<" "<<count2<<"\n"; } int *ggcounter; int ccounter[1]; long *ggresult,*gggquery; hipMalloc(&ggresult,m*sizeof(long)); hipMalloc(&gggquery,m*count*sizeof(long)); ccounter[0] = 0; hipMalloc(&ggcounter,1*sizeof(int)); hipMemcpy(ggcounter,ccounter,1*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(gggquery,query,m*count*sizeof(long),hipMemcpyHostToDevice); hipMemcpy(ggresult,result,m*sizeof(long),hipMemcpyHostToDevice); hipLaunchKernelGGL(( Accuracy), dim3(1),dim3(m), 0, 0, gggquery,ggresult,count,ggcounter); hipMemcpy(ccounter,ggcounter,1*sizeof(int),hipMemcpyDeviceToHost); float acc1 = ccounter[0]*100; acc1 = acc1/m; cout<<"Accuracy of KNN "<<acc1<<"\n"; hipDeviceSynchronize(); return 0; }
077eaeaebc961864b67dc87b1d0529f8328cc63b.cu
#include<iostream> #include<thrust/host_vector.h> #include<thrust/device_vector.h> #include<stdlib.h> #include<stdio.h> #include<thrust/sort.h> #include<math.h> #include<cuda.h> using namespace std; __global__ void k1(long *gdata,long *gquery,long *gres,int *gid,int N,int count) { int id = blockIdx.x*blockDim.x+threadIdx.x;; //gres[id*2+0] = id; gid[id] = id; float dist = 0; for(int i=1;i<count-1;i++){ //printf("%d\t%0.2f\t%0.2f\n",id,gdata[id*count+i],gquery[i]); dist += (gdata[id*count+i]-gquery[i])*(gdata[id*count+i]-gquery[i]); } gres[id] = sqrt(dist); //printf("%d %0.2f\n",id,gres[id]); } __global__ void maxk(long *data,long *query,long *res,int *gid,int N,int count){ int id = blockIdx.x*blockDim.x+threadIdx.x; int i = id%N; int j = id/N; float dis = 0; for(int k=1;k<count-1;k++){ dis +=((data[i*count+k]-query[j*count+k])*(data[i*count+k]-query[j*count+k])); } //printf("%d\n",id); res[id] = sqrt(dis); gid[id] = id; } __global__ void Accuracy(long *query,long *result,int count,int *counter){ int id = threadIdx.x; //printf("%d %d\n",s1[id],s2[id]); int x = 1; if(query[id*count+10]==result[id]){ atomicAdd(&counter[0],x); } } int main(){ int k = 3 ; FILE *fp; int N = 10000; int count = 11; fp = fopen("binput.txt","r"); char ch = ' '; long *data = (long *)malloc(N*count*sizeof(long)); for(int i=0;i<N;i++){ for(int j=0;j<count;j++){ fscanf(fp,"%ld",&data[i*count+j]); ch = fgetc(fp); //cout<<data[i*count+j]<<"\t"; } //cout<<"\n"; } int m = 100; count = 11; FILE *op; op = fopen("bitest.txt","r"); long *query = (long *)malloc(m*count*sizeof(long)); for(int i=0;i<m;i++){ for(int j=0;j<count;j++){ fscanf(op,"%ld",&query[i*count+j]); ch = fgetc(op); //cout<<query[i*count+j]<<"\t"; } //cout<<"\n"; } long *result = (long *)malloc(m*sizeof(long)); long *gquery,*gdata,*res,*gres; int *id,*gid; cudaEvent_t start1, stop1; cudaEventCreate(&start1); cudaEventCreate(&stop1); float ms = 0; for(int i=0;i<m;i++){ long *point = (long *)malloc(count*sizeof(long)); for(int j=0;j<count;j++){ point[j] = query[i*count+j]; } float milliseconds1 = 0; cudaEventRecord(start1,0); cudaMalloc(&gquery,count*sizeof(long)); cudaMalloc(&gdata,N*count*sizeof(long)); cudaMalloc(&gres,N*sizeof(long)); cudaMalloc(&gid,N*sizeof(int)); res = (long *)malloc(N*sizeof(long)); id = (int *)malloc(N*sizeof(int)); cudaMemcpy(gdata,data,N*count*sizeof(long),cudaMemcpyHostToDevice); cudaMemcpy(gquery,point,count*sizeof(long),cudaMemcpyHostToDevice); //Launching one test point to all train point kernal k1<<<16,N/16>>>(gdata,gquery,gres,gid,N,count); cudaMemcpy(res,gres,N*sizeof(long),cudaMemcpyDeviceToHost); cudaMemcpy(id,gid,N*sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(stop1,0); cudaEventSynchronize(stop1); cudaEventElapsedTime(&milliseconds1, start1, stop1); ms+=milliseconds1; thrust::sort_by_key(res, res + N, id); //cout<<"\n============================\n"; int count1,count2; count1 = count2 = 0; for(int j=0;j<k;j++){ //cout<<i<<" "<<id[j]<<" "<<res[j]<<"\n"; //cout<<id[j]<<" "<<data[id[j]*count+10]<<"\n"; if(data[id[j]*count+10]==2){ count1++; } if(data[id[j]*count+10]==4){ count2++; } } //cout<<count1<<" "<<count2<<"\n"; if(count1>count2){ result[i] = 2; } else{ result[i] = 4; } } int *gcounter; int counter[1]; long *gresult,*ggquery; cudaMalloc(&gresult,m*sizeof(long)); cudaMalloc(&ggquery,m*count*sizeof(long)); counter[0] = 0; cudaMalloc(&gcounter,1*sizeof(int)); cudaMemcpy(gcounter,counter,1*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(ggquery,query,m*count*sizeof(long),cudaMemcpyHostToDevice); cudaMemcpy(gresult,result,m*sizeof(long),cudaMemcpyHostToDevice); Accuracy<<<1,m>>>(ggquery,gresult,count,gcounter); cudaMemcpy(counter,gcounter,1*sizeof(int),cudaMemcpyDeviceToHost); printf(" Total time taken %f\n",ms); //cout<<counter[0]; float acc = counter[0]*100; acc = acc/m; cout<<"Accuracy of KNN "<<acc<<"\n"; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start,0); int *id2d,*gid2d; long *gdata2d,*gquery2d,*gres2d,*res2d; cudaMalloc(&gid2d,m*N*sizeof(int)); id2d = (int *)malloc(m*N*sizeof(int)); res2d = (long *)malloc(m*N*sizeof(long)); cudaMalloc(&gres2d,m*N*sizeof(long)); cudaMalloc(&gdata2d,N*count*sizeof(long)); cudaMalloc(&gquery2d,m*count*sizeof(long)); cudaMemcpy(gdata2d,data,N*count*sizeof(long),cudaMemcpyHostToDevice); cudaMemcpy(gquery2d,query,m*count*sizeof(long),cudaMemcpyHostToDevice); maxk<<<16*m,N/16>>>(gdata2d,gquery2d,gres2d,gid2d,N,count); cudaMemcpy(id2d,gid2d,m*N*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(res2d,gres2d,m*N*sizeof(long),cudaMemcpyDeviceToHost); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("Total time taken %f\n",milliseconds); for(int i=0;i<m;i++){ //cout<<"Line"<<i<<"\t"; long *dist = (long *)malloc(N*sizeof(long)); int *im = (int *)malloc(N*sizeof(int)); for(int j=0;j<N;j++){ //cout<<res2d[i*N+j]<<"\t"; im[j] = id2d[i*N+j]%N; dist[j] = res2d[i*N+j]; } thrust::sort_by_key(dist, dist + N, im); int count1,count2; count1 = count2 = 0; for(int j=0;j<k;j++){ //cout<<im[j]<<"\t"; if(data[im[j]*count+10]==2){ count1++; } if(data[im[j]*count+10]==4){ count2++; } } if(count1>count2){ result[i] = 2; } else{ result[i] = 4; } //cout<<result[i]<<"\n"; //cout<<count1<<" "<<count2<<"\n"; } int *ggcounter; int ccounter[1]; long *ggresult,*gggquery; cudaMalloc(&ggresult,m*sizeof(long)); cudaMalloc(&gggquery,m*count*sizeof(long)); ccounter[0] = 0; cudaMalloc(&ggcounter,1*sizeof(int)); cudaMemcpy(ggcounter,ccounter,1*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(gggquery,query,m*count*sizeof(long),cudaMemcpyHostToDevice); cudaMemcpy(ggresult,result,m*sizeof(long),cudaMemcpyHostToDevice); Accuracy<<<1,m>>>(gggquery,ggresult,count,ggcounter); cudaMemcpy(ccounter,ggcounter,1*sizeof(int),cudaMemcpyDeviceToHost); float acc1 = ccounter[0]*100; acc1 = acc1/m; cout<<"Accuracy of KNN "<<acc1<<"\n"; cudaDeviceSynchronize(); return 0; }
f29e592c826d00093c7c65da587eb82ac0cf47a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Lab 6 - volume of union of spheres #include <stdlib.h> #include <iostream> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <rocblas.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <cutil_math.h> #include "SimpleRNG.h" // spheres represented with x, y, z and posn and w as radius // macro for error-checking CUDA calls #define CUDA_SAFE_CALL(x) do { hipError_t err = (x); \ if (err != hipSuccess) { \ printf("Error %d \"%s\" at %s:%u\n", err, hipGetErrorString(err), \ __FILE__, __LINE__); \ exit(-1); } } while (false) //////////////////////////////////////////////////////////////////////////////// // helper functions for this lab... typedef struct { double xmin, xmax, ymin, ymax, zmin, zmax; double xrange, yrange, zrange; double volume; } BoundBox; // find the bounding box for a set of spheres void FindBoundingBox(float4* spheres, int numSpheres, BoundBox& box) { box.xmin = box.xmax = spheres[0].x; box.ymin = box.ymax = spheres[0].y; box.zmin = box.zmax = spheres[0].z; for (int x = 0; x < numSpheres; x++) { if (box.xmin > spheres[x].x - spheres[x].w) box.xmin = spheres[x].x - spheres[x].w; if (box.ymin > spheres[x].y - spheres[x].w) box.ymin = spheres[x].y - spheres[x].w; if (box.zmin > spheres[x].z - spheres[x].w) box.zmin = spheres[x].z - spheres[x].w; if (box.xmax < spheres[x].x + spheres[x].w) box.xmax = spheres[x].x + spheres[x].w; if (box.ymax < spheres[x].y + spheres[x].w) box.ymax = spheres[x].y + spheres[x].w; if (box.zmax < spheres[x].z + spheres[x].w) box.zmax = spheres[x].z + spheres[x].w; } box.xrange = box.xmax - box.xmin; box.yrange = box.ymax - box.ymin; box.zrange = box.zmax - box.zmin; box.volume = box.xrange * box.yrange * box.zrange; } // return the current time, in seconds double now() { struct timeval tv; gettimeofday(&tv, 0); return (double)tv.tv_sec + (double)tv.tv_usec / 1000000; } // generate a "random" seed based on time long long random_seed() { struct timeval tv; gettimeofday(&tv, 0); return tv.tv_sec + tv.tv_usec; } // check if a point is inside a sphere __device__ __host__ bool PointInSphere(float3& pt, float4& sphere) { return length(pt - make_float3(sphere)) < sphere.w; } SimpleRNG rng; //////////////////////////////////////////////////////////////////////////////// // kernels // inputs: // spheres, numSpheres - describe the array of spheres // points - points to check against spheres; coordinates are in [0, 1]^3 // doubleResults, intResults - arrays of doubles and floats to write results // to. either can be NULL, in which case results aren't written to them // box - bounding box to scale points into // total number of threads must be equal to the number of points __global__ void CheckPointsK(float4* spheres, int numSpheres, float3* points, double* doubleResults, unsigned int* intResults, BoundBox box) { // : check if the point is inside any sphere. if so, set the appropriate // entry in doubleResults and intResults to 1 (if non-NULL). //Decide whether we want to write to doubleResults or intResults int doubleRun; if (doubleResults == NULL) { doubleRun = 0; } else { doubleRun = 1; } extern __shared__ float4 sphereLocs[]; int blockId = blockIdx.x; int blockD = blockDim.x; int threadId = threadIdx.x; int globalId = blockD*blockId + threadId; //copy the global spheres to shared memory, but only using as many threads as we need to (i.e. numSpheres many) //: Require that numSpheres be no larger than numThreads (per block) if (threadId <= numSpheres) { sphereLocs[threadId] = spheres[threadId]; } __syncthreads(); //read in the point once from global memory (using its global id) and loop over all the spheres to see if in the collective volume float3 oldPoint = points[globalId]; float3 point = make_float3(oldPoint.x*box.xrange+box.xmin, oldPoint.y*box.yrange+box.ymin, oldPoint.z*box.zrange+box.zmin); //Compare doubleRun outside of the loop so we only have do it once if (doubleRun == 1) { doubleResults[globalId] = 0.; for (int j = 0; j < numSpheres; j++) { if (PointInSphere(point, sphereLocs[j])) { doubleResults[globalId] = 1.; break; } } } else { intResults[globalId] = 0; for (int j = 0; j < numSpheres; j++) { if (PointInSphere(point, sphereLocs[j])) { intResults[globalId] = 1; break; } } } } // generates 'count' random float3s using CURAND // only requires the total number of threads to be a factor of 'count' // ex. can call as such: GenerateRandom3K<<< 3, 8 >>>(..., 72) __global__ void GenerateRandom3K(float3* toWrite, long long seed, hiprandState_t* states, int count) { int index = blockDim.x * blockIdx.x + threadIdx.x; // : initialize random generator states, then generate random float3s in // [0, 1]^3 float a, b, c; //hiprandState_t *localState = &states[0]; hiprand_init(seed, index, 0, &states[index]); //hiprand_init(seed, 0, 0, states); for (int x = index; x < count; x += blockDim.x * gridDim.x) { a = hiprand_uniform(&states[index]); b = hiprand_uniform(&states[index]); c = hiprand_uniform(&states[index]); toWrite[x] = make_float3(a, b, c); } } // : add a reduction kernel to sum an array of unsigned ints, for VolumeCUDA __global__ void reduce(unsigned int *g_idata) { extern __shared__ int sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s = blockDim.x/2; s > 0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_idata[blockIdx.x] = sdata[0]; } //////////////////////////////////////////////////////////////////////////////// // host code // find volume on CPU double VolumeCPU(float4* spheres, int numSpheres, int numPts, BoundBox& box) { int x, y, numPtsInside = 0; for (x = 0; x < numPts; x++) { float3 pt = make_float3(rng.GetUniform() * box.xrange + box.xmin, rng.GetUniform() * box.yrange + box.ymin, rng.GetUniform() * box.zrange + box.zmin); for (y = 0; y < numSpheres; y++) if (PointInSphere(pt, spheres[y])) break; if (y < numSpheres) numPtsInside++; } return (double)numPtsInside / (double)numPts * box.volume; } // find volume on GPU, summing using CUBLAS double VolumeCUBLAS(float4* d_spheres, int numSpheres, int numPts, BoundBox& box) { //double vol = 0.0; const int numThreads = 512; //: Indicate on README that we want numPts divisible by 512 dim3 grid (numPts / numThreads, 1, 1); dim3 block (numThreads, 1, 1); // : // 1. allocate memory for needed data // 2. generate random points on GPU in [0, 1]^3 using CURAND host API // 3. check if each point is within any sphere // 4. count points using CUBLAS // 5. free memory on GPU //STEP 1 unsigned int spheresSize = numSpheres * sizeof(float4); unsigned int pointsSize = numPts * sizeof(float3); unsigned int doubleResultsSize = numPts * sizeof(double); float3* points; double* doubleResults; unsigned int* intResults = 0; CUDA_SAFE_CALL(hipMalloc((void**)&points, pointsSize)); CUDA_SAFE_CALL(hipMalloc((void**)&doubleResults, doubleResultsSize)); //STEP 2 hiprandGenerator_t r; hiprandCreateGenerator(&r, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(r, (long long)random_seed); hiprandGenerateUniform(r, (float*)points, numPts*3); hiprandDestroyGenerator(r); //STEP 3 hipLaunchKernelGGL(( CheckPointsK), dim3(grid), dim3(block), spheresSize , 0, d_spheres, numSpheres, points, doubleResults, intResults, box); //STEP 4 double result; result = hipblasDasum(numPts, doubleResults, 1); //STEP 5 CUDA_SAFE_CALL(hipFree(points)); CUDA_SAFE_CALL(hipFree(doubleResults)); return (result / numPts) * (double) box.volume; } // find volume on GPU, summing using reduction kernel double VolumeCUDA(float4* d_spheres, int numSpheres, int numPts, BoundBox& box) { //double vol = 0.0; const int numThreads = 512; //: Indicate on README that we want numPts divisible by 512 dim3 grid (numPts / numThreads, 1, 1); dim3 block (numThreads, 1, 1); // : // 1. allocate memory for needed data (including random generator states) // 2. generate random points on GPU in [0, 1]^3 using CURAND device API // 3. check if each point is within any sphere // 4. count points using reduction kernel // 5. free memory on GPU //STEP 1 unsigned int spheresSize = numSpheres * sizeof(float4); unsigned int pointsSize = numPts * sizeof(float3); unsigned int intResultsSize = numPts * sizeof(int); float3* points; double* doubleResults = 0; unsigned int* intResults; hiprandState_t* devStates; CUDA_SAFE_CALL(hipMalloc((void**)&devStates, numThreads * sizeof(hiprandState_t))); CUDA_SAFE_CALL(hipMalloc((void**)&points, pointsSize)); //STEP 2 dim3 randomGrid (1, 1, 1); dim3 randomBlock (numThreads, 1, 1); hipLaunchKernelGGL(( GenerateRandom3K), dim3(randomGrid), dim3(randomBlock) , 0, 0, points, random_seed(), devStates, numPts); CUDA_SAFE_CALL(hipFree(devStates)); //STEP 3 CUDA_SAFE_CALL(hipMalloc((void**)&intResults, intResultsSize)); hipLaunchKernelGGL(( CheckPointsK), dim3(grid), dim3(block), spheresSize , 0, d_spheres, numSpheres, points, doubleResults, intResults, box); CUDA_SAFE_CALL(hipFree(points)); //STEP 4 int numReduceThreads = 512; //the factor by which to reduce the number of points by int dim; for (dim = numPts; dim >= numReduceThreads; dim/= numReduceThreads) { dim3 reductionBlock (numReduceThreads, 1, 1); dim3 reductionGrid (dim / numReduceThreads, 1, 1); unsigned int sharedMemSize = numReduceThreads*sizeof(int); hipLaunchKernelGGL(( reduce), dim3(reductionGrid), dim3(reductionBlock), sharedMemSize , 0, intResults); } numReduceThreads = dim; dim3 reductionBlock (numReduceThreads, 1, 1); dim3 reductionGrid (dim / numReduceThreads, 1, 1); unsigned int sharedMemSize = numReduceThreads*sizeof(int); hipLaunchKernelGGL(( reduce), dim3(reductionGrid), dim3(reductionBlock), sharedMemSize , 0, intResults); //STEP 5 unsigned int result; CUDA_SAFE_CALL(hipMemcpy(&result, &intResults[0], sizeof(int), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(intResults)); return ((double)result / (double)numPts) * (double) box.volume; } //////////////////////////////////////////////////////////////////////////////// // main program void RunVolume(const char* name, double (*Vol)(float4*, int, int, BoundBox&), float4* spheres, int numSpheres, int numPts, BoundBox& box) { printf("find volume (%s)...\n", name); double start_time = now(); double volume = Vol(spheres, numSpheres, numPts, box); double end_time = now(); printf(" volume: %g\n", volume); printf(" time: %g sec\n", end_time - start_time); } int main(int argc, char** argv) { // seed the CPU random generator rng.SetState(random_seed(), random_seed()); // set program parameters and allocate memory for spheres printf("generate spheres...\n"); int numPts = 1024 * 1024 * 16; int numSpheres = 100; float4* spheres = (float4*)malloc(numPts * sizeof(float4)); if (!spheres) { printf("failed to allocate memory for spheres\n"); return -1; } // generate random spheres centered in [0, 10]^3 double totalVolume = 0.0f; for (int x = 0; x < numSpheres; x++) { spheres[x].x = rng.GetUniform() * 10.0f; spheres[x].y = rng.GetUniform() * 10.0f; spheres[x].z = rng.GetUniform() * 10.0f; spheres[x].w = rng.GetUniform() + 1.0f; totalVolume += (4.0f * spheres[x].w * spheres[x].w * spheres[x].w * M_PI / 3.0f); // uncomment to print spheres //printf(" sphere: (%g, %g, %g) with r = %g\n", spheres[x].x, spheres[x].y, // spheres[x].z, spheres[x].w); } printf(" number of spheres: %u\n", numSpheres); printf(" non-union volume: %g\n", totalVolume); printf(" number of points: %u\n", numPts); // find bounding box of spheres printf("find bounds rect...\n"); BoundBox box; FindBoundingBox(spheres, numSpheres, box); printf(" boundsrect: [%g, %g] x [%g, %g] x [%g, %g]\n", box.xmin, box.xmax, box.ymin, box.ymax, box.zmin, box.zmax); printf(" boundsrange: %g, %g, %g (volume %g)\n", box.xrange, box.yrange, box.zrange, box.volume); // init cublas and allocate memory on the GPU printf("initialize GPU...\n"); hipblasInit(); float4* d_spheres; CUDA_SAFE_CALL(hipMalloc(&d_spheres, numSpheres * sizeof(float4))); // copy the spheres to the GPU hipMemcpy(d_spheres, spheres, numSpheres * sizeof(float4), hipMemcpyHostToDevice); // run CPU version RunVolume("CPU", VolumeCPU, spheres, numSpheres, numPts, box); RunVolume("CUBLAS", VolumeCUBLAS, d_spheres, numSpheres, numPts, box); RunVolume("no CUBLAS", VolumeCUDA, d_spheres, numSpheres, numPts, box); // get rid of stuff in memory printf("clean up...\n"); CUDA_SAFE_CALL(hipFree(d_spheres)); hipblasShutdown(); hipDeviceReset(); return 0; }
f29e592c826d00093c7c65da587eb82ac0cf47a5.cu
// Lab 6 - volume of union of spheres #include <stdlib.h> #include <iostream> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <cublas.h> #include <curand.h> #include <curand_kernel.h> #include <cutil_math.h> #include "SimpleRNG.h" // spheres represented with x, y, z and posn and w as radius // macro for error-checking CUDA calls #define CUDA_SAFE_CALL(x) do { cudaError_t err = (x); \ if (err != cudaSuccess) { \ printf("Error %d \"%s\" at %s:%u\n", err, cudaGetErrorString(err), \ __FILE__, __LINE__); \ exit(-1); } } while (false) //////////////////////////////////////////////////////////////////////////////// // helper functions for this lab... typedef struct { double xmin, xmax, ymin, ymax, zmin, zmax; double xrange, yrange, zrange; double volume; } BoundBox; // find the bounding box for a set of spheres void FindBoundingBox(float4* spheres, int numSpheres, BoundBox& box) { box.xmin = box.xmax = spheres[0].x; box.ymin = box.ymax = spheres[0].y; box.zmin = box.zmax = spheres[0].z; for (int x = 0; x < numSpheres; x++) { if (box.xmin > spheres[x].x - spheres[x].w) box.xmin = spheres[x].x - spheres[x].w; if (box.ymin > spheres[x].y - spheres[x].w) box.ymin = spheres[x].y - spheres[x].w; if (box.zmin > spheres[x].z - spheres[x].w) box.zmin = spheres[x].z - spheres[x].w; if (box.xmax < spheres[x].x + spheres[x].w) box.xmax = spheres[x].x + spheres[x].w; if (box.ymax < spheres[x].y + spheres[x].w) box.ymax = spheres[x].y + spheres[x].w; if (box.zmax < spheres[x].z + spheres[x].w) box.zmax = spheres[x].z + spheres[x].w; } box.xrange = box.xmax - box.xmin; box.yrange = box.ymax - box.ymin; box.zrange = box.zmax - box.zmin; box.volume = box.xrange * box.yrange * box.zrange; } // return the current time, in seconds double now() { struct timeval tv; gettimeofday(&tv, 0); return (double)tv.tv_sec + (double)tv.tv_usec / 1000000; } // generate a "random" seed based on time long long random_seed() { struct timeval tv; gettimeofday(&tv, 0); return tv.tv_sec + tv.tv_usec; } // check if a point is inside a sphere __device__ __host__ bool PointInSphere(float3& pt, float4& sphere) { return length(pt - make_float3(sphere)) < sphere.w; } SimpleRNG rng; //////////////////////////////////////////////////////////////////////////////// // kernels // inputs: // spheres, numSpheres - describe the array of spheres // points - points to check against spheres; coordinates are in [0, 1]^3 // doubleResults, intResults - arrays of doubles and floats to write results // to. either can be NULL, in which case results aren't written to them // box - bounding box to scale points into // total number of threads must be equal to the number of points __global__ void CheckPointsK(float4* spheres, int numSpheres, float3* points, double* doubleResults, unsigned int* intResults, BoundBox box) { // : check if the point is inside any sphere. if so, set the appropriate // entry in doubleResults and intResults to 1 (if non-NULL). //Decide whether we want to write to doubleResults or intResults int doubleRun; if (doubleResults == NULL) { doubleRun = 0; } else { doubleRun = 1; } extern __shared__ float4 sphereLocs[]; int blockId = blockIdx.x; int blockD = blockDim.x; int threadId = threadIdx.x; int globalId = blockD*blockId + threadId; //copy the global spheres to shared memory, but only using as many threads as we need to (i.e. numSpheres many) //: Require that numSpheres be no larger than numThreads (per block) if (threadId <= numSpheres) { sphereLocs[threadId] = spheres[threadId]; } __syncthreads(); //read in the point once from global memory (using its global id) and loop over all the spheres to see if in the collective volume float3 oldPoint = points[globalId]; float3 point = make_float3(oldPoint.x*box.xrange+box.xmin, oldPoint.y*box.yrange+box.ymin, oldPoint.z*box.zrange+box.zmin); //Compare doubleRun outside of the loop so we only have do it once if (doubleRun == 1) { doubleResults[globalId] = 0.; for (int j = 0; j < numSpheres; j++) { if (PointInSphere(point, sphereLocs[j])) { doubleResults[globalId] = 1.; break; } } } else { intResults[globalId] = 0; for (int j = 0; j < numSpheres; j++) { if (PointInSphere(point, sphereLocs[j])) { intResults[globalId] = 1; break; } } } } // generates 'count' random float3s using CURAND // only requires the total number of threads to be a factor of 'count' // ex. can call as such: GenerateRandom3K<<< 3, 8 >>>(..., 72) __global__ void GenerateRandom3K(float3* toWrite, long long seed, curandState* states, int count) { int index = blockDim.x * blockIdx.x + threadIdx.x; // : initialize random generator states, then generate random float3s in // [0, 1]^3 float a, b, c; //curandState *localState = &states[0]; curand_init(seed, index, 0, &states[index]); //curand_init(seed, 0, 0, states); for (int x = index; x < count; x += blockDim.x * gridDim.x) { a = curand_uniform(&states[index]); b = curand_uniform(&states[index]); c = curand_uniform(&states[index]); toWrite[x] = make_float3(a, b, c); } } // : add a reduction kernel to sum an array of unsigned ints, for VolumeCUDA __global__ void reduce(unsigned int *g_idata) { extern __shared__ int sdata[]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s = blockDim.x/2; s > 0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_idata[blockIdx.x] = sdata[0]; } //////////////////////////////////////////////////////////////////////////////// // host code // find volume on CPU double VolumeCPU(float4* spheres, int numSpheres, int numPts, BoundBox& box) { int x, y, numPtsInside = 0; for (x = 0; x < numPts; x++) { float3 pt = make_float3(rng.GetUniform() * box.xrange + box.xmin, rng.GetUniform() * box.yrange + box.ymin, rng.GetUniform() * box.zrange + box.zmin); for (y = 0; y < numSpheres; y++) if (PointInSphere(pt, spheres[y])) break; if (y < numSpheres) numPtsInside++; } return (double)numPtsInside / (double)numPts * box.volume; } // find volume on GPU, summing using CUBLAS double VolumeCUBLAS(float4* d_spheres, int numSpheres, int numPts, BoundBox& box) { //double vol = 0.0; const int numThreads = 512; //: Indicate on README that we want numPts divisible by 512 dim3 grid (numPts / numThreads, 1, 1); dim3 block (numThreads, 1, 1); // : // 1. allocate memory for needed data // 2. generate random points on GPU in [0, 1]^3 using CURAND host API // 3. check if each point is within any sphere // 4. count points using CUBLAS // 5. free memory on GPU //STEP 1 unsigned int spheresSize = numSpheres * sizeof(float4); unsigned int pointsSize = numPts * sizeof(float3); unsigned int doubleResultsSize = numPts * sizeof(double); float3* points; double* doubleResults; unsigned int* intResults = 0; CUDA_SAFE_CALL(cudaMalloc((void**)&points, pointsSize)); CUDA_SAFE_CALL(cudaMalloc((void**)&doubleResults, doubleResultsSize)); //STEP 2 curandGenerator_t r; curandCreateGenerator(&r, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(r, (long long)random_seed); curandGenerateUniform(r, (float*)points, numPts*3); curandDestroyGenerator(r); //STEP 3 CheckPointsK<<< grid, block, spheresSize >>> (d_spheres, numSpheres, points, doubleResults, intResults, box); //STEP 4 double result; result = cublasDasum(numPts, doubleResults, 1); //STEP 5 CUDA_SAFE_CALL(cudaFree(points)); CUDA_SAFE_CALL(cudaFree(doubleResults)); return (result / numPts) * (double) box.volume; } // find volume on GPU, summing using reduction kernel double VolumeCUDA(float4* d_spheres, int numSpheres, int numPts, BoundBox& box) { //double vol = 0.0; const int numThreads = 512; //: Indicate on README that we want numPts divisible by 512 dim3 grid (numPts / numThreads, 1, 1); dim3 block (numThreads, 1, 1); // : // 1. allocate memory for needed data (including random generator states) // 2. generate random points on GPU in [0, 1]^3 using CURAND device API // 3. check if each point is within any sphere // 4. count points using reduction kernel // 5. free memory on GPU //STEP 1 unsigned int spheresSize = numSpheres * sizeof(float4); unsigned int pointsSize = numPts * sizeof(float3); unsigned int intResultsSize = numPts * sizeof(int); float3* points; double* doubleResults = 0; unsigned int* intResults; curandState* devStates; CUDA_SAFE_CALL(cudaMalloc((void**)&devStates, numThreads * sizeof(curandState))); CUDA_SAFE_CALL(cudaMalloc((void**)&points, pointsSize)); //STEP 2 dim3 randomGrid (1, 1, 1); dim3 randomBlock (numThreads, 1, 1); GenerateRandom3K<<< randomGrid, randomBlock >>> (points, random_seed(), devStates, numPts); CUDA_SAFE_CALL(cudaFree(devStates)); //STEP 3 CUDA_SAFE_CALL(cudaMalloc((void**)&intResults, intResultsSize)); CheckPointsK<<< grid, block, spheresSize >>> (d_spheres, numSpheres, points, doubleResults, intResults, box); CUDA_SAFE_CALL(cudaFree(points)); //STEP 4 int numReduceThreads = 512; //the factor by which to reduce the number of points by int dim; for (dim = numPts; dim >= numReduceThreads; dim/= numReduceThreads) { dim3 reductionBlock (numReduceThreads, 1, 1); dim3 reductionGrid (dim / numReduceThreads, 1, 1); unsigned int sharedMemSize = numReduceThreads*sizeof(int); reduce<<< reductionGrid, reductionBlock, sharedMemSize >>> (intResults); } numReduceThreads = dim; dim3 reductionBlock (numReduceThreads, 1, 1); dim3 reductionGrid (dim / numReduceThreads, 1, 1); unsigned int sharedMemSize = numReduceThreads*sizeof(int); reduce<<< reductionGrid, reductionBlock, sharedMemSize >>> (intResults); //STEP 5 unsigned int result; CUDA_SAFE_CALL(cudaMemcpy(&result, &intResults[0], sizeof(int), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(intResults)); return ((double)result / (double)numPts) * (double) box.volume; } //////////////////////////////////////////////////////////////////////////////// // main program void RunVolume(const char* name, double (*Vol)(float4*, int, int, BoundBox&), float4* spheres, int numSpheres, int numPts, BoundBox& box) { printf("find volume (%s)...\n", name); double start_time = now(); double volume = Vol(spheres, numSpheres, numPts, box); double end_time = now(); printf(" volume: %g\n", volume); printf(" time: %g sec\n", end_time - start_time); } int main(int argc, char** argv) { // seed the CPU random generator rng.SetState(random_seed(), random_seed()); // set program parameters and allocate memory for spheres printf("generate spheres...\n"); int numPts = 1024 * 1024 * 16; int numSpheres = 100; float4* spheres = (float4*)malloc(numPts * sizeof(float4)); if (!spheres) { printf("failed to allocate memory for spheres\n"); return -1; } // generate random spheres centered in [0, 10]^3 double totalVolume = 0.0f; for (int x = 0; x < numSpheres; x++) { spheres[x].x = rng.GetUniform() * 10.0f; spheres[x].y = rng.GetUniform() * 10.0f; spheres[x].z = rng.GetUniform() * 10.0f; spheres[x].w = rng.GetUniform() + 1.0f; totalVolume += (4.0f * spheres[x].w * spheres[x].w * spheres[x].w * M_PI / 3.0f); // uncomment to print spheres //printf(" sphere: (%g, %g, %g) with r = %g\n", spheres[x].x, spheres[x].y, // spheres[x].z, spheres[x].w); } printf(" number of spheres: %u\n", numSpheres); printf(" non-union volume: %g\n", totalVolume); printf(" number of points: %u\n", numPts); // find bounding box of spheres printf("find bounds rect...\n"); BoundBox box; FindBoundingBox(spheres, numSpheres, box); printf(" boundsrect: [%g, %g] x [%g, %g] x [%g, %g]\n", box.xmin, box.xmax, box.ymin, box.ymax, box.zmin, box.zmax); printf(" boundsrange: %g, %g, %g (volume %g)\n", box.xrange, box.yrange, box.zrange, box.volume); // init cublas and allocate memory on the GPU printf("initialize GPU...\n"); cublasInit(); float4* d_spheres; CUDA_SAFE_CALL(cudaMalloc(&d_spheres, numSpheres * sizeof(float4))); // copy the spheres to the GPU cudaMemcpy(d_spheres, spheres, numSpheres * sizeof(float4), cudaMemcpyHostToDevice); // run CPU version RunVolume("CPU", VolumeCPU, spheres, numSpheres, numPts, box); RunVolume("CUBLAS", VolumeCUBLAS, d_spheres, numSpheres, numPts, box); RunVolume("no CUBLAS", VolumeCUDA, d_spheres, numSpheres, numPts, box); // get rid of stuff in memory printf("clean up...\n"); CUDA_SAFE_CALL(cudaFree(d_spheres)); cublasShutdown(); cudaThreadExit(); return 0; }
e01647adcd77675b2bbc59f56f10cd5e2be0866a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void add(int *c, int *a, int *b) { *c = *a + *b; } int main(int argc, char* argv[]) { int a, b, c; int *da, *db, *dc; hipMalloc((void**) &da, sizeof(int)); hipMalloc((void**) &db, sizeof(int)); hipMalloc((void**) &dc, sizeof(int)); scanf("%i %i", &a, &b); hipMemcpy(da, &a, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(db, &b, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, dc, da, db); hipMemcpy(&c, dc, sizeof(int), hipMemcpyDeviceToHost); printf("c = %i\n", c); hipFree(da); hipFree(db); hipFree(dc); return 0; }
e01647adcd77675b2bbc59f56f10cd5e2be0866a.cu
#include <stdio.h> #include <cuda_runtime.h> __global__ void add(int *c, int *a, int *b) { *c = *a + *b; } int main(int argc, char* argv[]) { int a, b, c; int *da, *db, *dc; cudaMalloc((void**) &da, sizeof(int)); cudaMalloc((void**) &db, sizeof(int)); cudaMalloc((void**) &dc, sizeof(int)); scanf("%i %i", &a, &b); cudaMemcpy(da, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(db, &b, sizeof(int), cudaMemcpyHostToDevice); add<<<1, 1>>>(dc, da, db); cudaMemcpy(&c, dc, sizeof(int), cudaMemcpyDeviceToHost); printf("c = %i\n", c); cudaFree(da); cudaFree(db); cudaFree(dc); return 0; }
780dea88d4237cbf7a0e7ca81601a7c2d89dbc32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> // Variables float* h_A; // host vectors float* h_C; float* d_A; // device vectors float* d_C; // Functions void RandomInit(float*, int); __global__ void FindMax(const float*, float*, int); // Host Code int main(){ // Settings // gid -> GPU device id (0, 1, ...) // err -> error message get from CUDA calls // N -> Length of an array // size -> memory size of the allocate array // sb -> memory size after handle by GPU // sm -> size of shared memory in each individual block // m -> the power of threadsPerBlock // threadsPerBlock, blocksPerGrid -> For launching kernel // // start, stop -> CUDA event timer // Intime -> Calculate the input time, allocate and move data in device memory // gputime -> Time spent in GPU only // Outime -> Time used to handle the rest of finding maximum // gputime_tot -> Time total spent // // max_value -> Maximum value inside this array, find by GPU int gid; hipError_t err; int N; int size, sb; int sm; int threadsPerBlock, blocksPerGrid; hipEvent_t start, stop; float Intime, gputime, Outime, gputime_tot, cputime; float max_value; float max_value_CPU; FILE *output; // Optimize block size and grid size , with array length N. N = 81920007; size = N * sizeof(float); // Select GPU device printf("Select the GPU with device ID: "); scanf("%d", &gid); err = hipSetDevice(gid); if (err != hipSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Set GPU with device ID = %d\n", gid); // Create the timer hipEventCreate(&start); hipEventCreate(&stop); output = fopen("optimize_result.txt", "a"); fprintf(output, "BlockSize GridSize GPUonly TotalGPU TotalCPU SpeedUp Check\n"); fclose(output); // m -> the power of the block size // g -> the power of the grid size for(int m = 1; m <= 10; m = m+1){ for(int g = 1; g <= 4; g = g+1){ Intime = 0.0; gputime = 0.0; Outime = 0.0; gputime_tot = 0.0; cputime = 0.0; max_value = -2.0; max_value_CPU = -2.0; threadsPerBlock = pow(2, m); blocksPerGrid = pow(10, g); printf("%4d %7d\n", threadsPerBlock, blocksPerGrid); // Allocate input array sb = blocksPerGrid * sizeof(float); h_A = (float*)malloc(size); h_C = (float*)malloc(sb); // Initialize input vectors RandomInit(h_A, N); // Start the timer: Record allocate memory and move data, from host to device hipEventRecord(start, 0); // Allocate the array in device memory hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_C, sb); // Copy array from host to device memory hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); // Stop the timer: Record allocate memory and move data, from host to device hipEventRecord(stop, 0); hipEventSynchronize(stop); // Calculate spend time: Record allocate memory and move data, from host to device hipEventElapsedTime(&Intime, start, stop); // start the timer hipEventRecord(start, 0); // Called the kernel sm = threadsPerBlock * sizeof(float); hipLaunchKernelGGL(( FindMax) , dim3(blocksPerGrid), dim3(threadsPerBlock), sm , 0, d_A, d_C, N); // stop the timer hipEventRecord(stop, 0); hipEventSynchronize(stop); // Calculate spend time: Matrix Addition calculation time hipEventElapsedTime(&gputime, start, stop); // start the timer hipEventRecord(start,0); // Copy result from device memory to host memory // h_C contains the result of each block in host memory hipMemcpy(h_C, d_C, sb, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_C); for(int i = 0; i < blocksPerGrid; i = i+1){ if(h_C[i] > max_value){ max_value = h_C[i]; } } // stop the timer hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime( &Outime, start, stop); gputime_tot = Intime + gputime + Outime; // start the timer hipEventRecord(start, 0); // Compute the reference solution for(int i = 0; i < N; i = i+1){ if(h_A[i] > max_value_CPU){ max_value_CPU = h_A[i]; } } // stop the timer hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&cputime, start, stop); free(h_A); free(h_C); // Write to file output = fopen("optimize_result.txt", "a"); fprintf(output, "%d %d %f %f %f %f ", threadsPerBlock, blocksPerGrid, gputime, gputime_tot, cputime, cputime/gputime_tot); fprintf(output, "%.23f\n", max_value - max_value_CPU); fclose(output); } } // Destroy the timer hipEventDestroy(start); hipEventDestroy(stop); // Reset the device hipDeviceReset(); return 0; } __global__ void FindMax(const float* A, float* C, int N){ extern __shared__ float cache[]; int i = blockDim.x * blockIdx.x + threadIdx.x; int cacheIndex = threadIdx.x; float max = -2.0; while (i < N) { if(A[i] > max){ max = A[i]; } i = i + blockDim.x * gridDim.x; } cache[cacheIndex] = max; __syncthreads(); // Perform parallel reduction, threadsPerBlock must be 2^m int ib = blockDim.x/2; while (ib != 0) { if(cacheIndex < ib){ if(cache[cacheIndex] < cache[cacheIndex + ib]){ cache[cacheIndex] = cache[cacheIndex + ib]; } } __syncthreads(); ib = ib / 2; } if(cacheIndex == 0){ C[blockIdx.x] = cache[0]; } } // Allocates an array with random float entries in (-1,1) void RandomInit(float* data, int n){ for (int i = 0; i < n; ++i) data[i] = 2.0*rand()/(float)RAND_MAX - 1.0; }
780dea88d4237cbf7a0e7ca81601a7c2d89dbc32.cu
#include <stdio.h> #include <stdlib.h> // Variables float* h_A; // host vectors float* h_C; float* d_A; // device vectors float* d_C; // Functions void RandomInit(float*, int); __global__ void FindMax(const float*, float*, int); // Host Code int main(){ // Settings // gid -> GPU device id (0, 1, ...) // err -> error message get from CUDA calls // N -> Length of an array // size -> memory size of the allocate array // sb -> memory size after handle by GPU // sm -> size of shared memory in each individual block // m -> the power of threadsPerBlock // threadsPerBlock, blocksPerGrid -> For launching kernel // // start, stop -> CUDA event timer // Intime -> Calculate the input time, allocate and move data in device memory // gputime -> Time spent in GPU only // Outime -> Time used to handle the rest of finding maximum // gputime_tot -> Time total spent // // max_value -> Maximum value inside this array, find by GPU int gid; cudaError_t err; int N; int size, sb; int sm; int threadsPerBlock, blocksPerGrid; cudaEvent_t start, stop; float Intime, gputime, Outime, gputime_tot, cputime; float max_value; float max_value_CPU; FILE *output; // Optimize block size and grid size , with array length N. N = 81920007; size = N * sizeof(float); // Select GPU device printf("Select the GPU with device ID: "); scanf("%d", &gid); err = cudaSetDevice(gid); if (err != cudaSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Set GPU with device ID = %d\n", gid); // Create the timer cudaEventCreate(&start); cudaEventCreate(&stop); output = fopen("optimize_result.txt", "a"); fprintf(output, "BlockSize GridSize GPUonly TotalGPU TotalCPU SpeedUp Check\n"); fclose(output); // m -> the power of the block size // g -> the power of the grid size for(int m = 1; m <= 10; m = m+1){ for(int g = 1; g <= 4; g = g+1){ Intime = 0.0; gputime = 0.0; Outime = 0.0; gputime_tot = 0.0; cputime = 0.0; max_value = -2.0; max_value_CPU = -2.0; threadsPerBlock = pow(2, m); blocksPerGrid = pow(10, g); printf("%4d %7d\n", threadsPerBlock, blocksPerGrid); // Allocate input array sb = blocksPerGrid * sizeof(float); h_A = (float*)malloc(size); h_C = (float*)malloc(sb); // Initialize input vectors RandomInit(h_A, N); // Start the timer: Record allocate memory and move data, from host to device cudaEventRecord(start, 0); // Allocate the array in device memory cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_C, sb); // Copy array from host to device memory cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); // Stop the timer: Record allocate memory and move data, from host to device cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Calculate spend time: Record allocate memory and move data, from host to device cudaEventElapsedTime(&Intime, start, stop); // start the timer cudaEventRecord(start, 0); // Called the kernel sm = threadsPerBlock * sizeof(float); FindMax <<< blocksPerGrid, threadsPerBlock, sm >>>(d_A, d_C, N); // stop the timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Calculate spend time: Matrix Addition calculation time cudaEventElapsedTime(&gputime, start, stop); // start the timer cudaEventRecord(start,0); // Copy result from device memory to host memory // h_C contains the result of each block in host memory cudaMemcpy(h_C, d_C, sb, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_C); for(int i = 0; i < blocksPerGrid; i = i+1){ if(h_C[i] > max_value){ max_value = h_C[i]; } } // stop the timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime( &Outime, start, stop); gputime_tot = Intime + gputime + Outime; // start the timer cudaEventRecord(start, 0); // Compute the reference solution for(int i = 0; i < N; i = i+1){ if(h_A[i] > max_value_CPU){ max_value_CPU = h_A[i]; } } // stop the timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cputime, start, stop); free(h_A); free(h_C); // Write to file output = fopen("optimize_result.txt", "a"); fprintf(output, "%d %d %f %f %f %f ", threadsPerBlock, blocksPerGrid, gputime, gputime_tot, cputime, cputime/gputime_tot); fprintf(output, "%.23f\n", max_value - max_value_CPU); fclose(output); } } // Destroy the timer cudaEventDestroy(start); cudaEventDestroy(stop); // Reset the device cudaDeviceReset(); return 0; } __global__ void FindMax(const float* A, float* C, int N){ extern __shared__ float cache[]; int i = blockDim.x * blockIdx.x + threadIdx.x; int cacheIndex = threadIdx.x; float max = -2.0; while (i < N) { if(A[i] > max){ max = A[i]; } i = i + blockDim.x * gridDim.x; } cache[cacheIndex] = max; __syncthreads(); // Perform parallel reduction, threadsPerBlock must be 2^m int ib = blockDim.x/2; while (ib != 0) { if(cacheIndex < ib){ if(cache[cacheIndex] < cache[cacheIndex + ib]){ cache[cacheIndex] = cache[cacheIndex + ib]; } } __syncthreads(); ib = ib / 2; } if(cacheIndex == 0){ C[blockIdx.x] = cache[0]; } } // Allocates an array with random float entries in (-1,1) void RandomInit(float* data, int n){ for (int i = 0; i < n; ++i) data[i] = 2.0*rand()/(float)RAND_MAX - 1.0; }
689422c8ed2df48f16fad1fb24d5a899416f1f78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include <hipsparse.h> #include "matrix/CSR.h" #include "matchingAggregation.h" #include "unsymMatching.cu" #include "hipcub/hipcub.hpp" #include "hipcub/hipcub.hpp" #include "hipcub/hipcub.hpp" #include "suitor.cu" #define MIN(a,b) (((a)<(b))?(a):(b)) namespace Matching{ CSR* toMaximumProductMatrix(CSR *AH); CSR* makeAH(CSR *A, vector<vtype> *w); vector<itype>* suitor(CSR *A, vector<vtype> *w); } //#################################################################################### // ensure the numeric symmetry in the CSR matrix __global__ void _write_T(itype n, vtype *val, itype *col, itype *row){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; for(int j=row[i]; j<row[i+1]; j++){ itype c = col[j]; if(i < c) break; vtype v = val[j]; for(int jj=row[c]; jj<row[c+1]; jj++){ if(col[jj] == i){ val[jj] = v; break; } } } } //#################################################################################### #if TYPE_WRITE_T == 0 __forceinline__ __device__ int binsearch(int array[], unsigned int size, int value) { unsigned int low, high, medium; low=0; high=size; while(low<high) { medium=(high+low)/2; if(value > array[medium]) { low=medium+1; } else { high=medium; } } return low; } //################################################################################################ __global__ void _write_T_warp(itype n, int MINI_WARP_SIZE, vtype *A_val, itype *A_col, itype *A_row){ itype tid = blockDim.x * blockIdx.x + threadIdx.x; int warp = tid / MINI_WARP_SIZE; if(warp >= n) return; int lane = tid % MINI_WARP_SIZE; int mask_id = (tid % FULL_WARP) / MINI_WARP_SIZE; int warp_mask = getMaskByWarpID(MINI_WARP_SIZE, mask_id); vtype t; itype j_stop = A_row[warp+1]; for(int j=A_row[warp]+lane; j<j_stop; j+=MINI_WARP_SIZE){ itype c = A_col[j]; if(warp < c) break; int nc = A_row[c+1] - A_row[c]; int jj=binsearch(A_col+A_row[c], nc, warp); t=A_val[jj+A_row[c]]; A_val[j]=t; } } //################################################################################################ #elif TYPE_WRITE_T == 1 __global__ void _write_T_warp(itype n, int MINI_WARP_SIZE, vtype *A_val, itype *A_col, itype *A_row){ itype tid = blockDim.x * blockIdx.x + threadIdx.x; int warp = tid / MINI_WARP_SIZE; if(warp >= n) return; int lane = tid % MINI_WARP_SIZE; int mask_id = (tid % FULL_WARP) / MINI_WARP_SIZE; int warp_mask = getMaskByWarpID(MINI_WARP_SIZE, mask_id); itype j_stop = A_row[warp+1]; for(int j=A_row[warp]+lane; j<j_stop; j+=MINI_WARP_SIZE){ itype c = A_col[j]; if(warp < c) break; vtype v = A_val[j]; for(int jj=A_row[c]; jj<A_row[c+1]; jj++){ if(A_col[jj] == warp){ A_val[jj] = v; break; } } } } #endif //#################################################################################### // kernel che costruisce preventivamente il vettore C = d * w^2 usato in _makeAH __global__ void _makeC(stype n, vtype *val, itype *col, itype *row, vtype *w, vtype *C){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; itype r = i; itype j_start = row[i]; itype j_stop = row[i+1]; int j; for(j=j_start; j<j_stop; j++){ itype c = col[j]; // if is a diagonal element if(c == r){ C[r] = val[j] * pow(w[r], 2); break; } } } //#################################################################################### __global__ void _makeC_warp(stype n, int MINI_WARP_SIZE, vtype *A_val, itype *A_col, itype *A_row, vtype *w, vtype *C){ /* itype tid = blockDim.x * blockIdx.x + threadIdx.x; int warp = tid / MINI_WARP_SIZE; if(warp >= n) return; int lane = tid % MINI_WARP_SIZE; int mask_id = (tid % FULL_WARP) / MINI_WARP_SIZE; int warp_mask = getMaskByWarpID(MINI_WARP_SIZE, mask_id); itype j_start = A_row[warp]; itype j_stop = A_row[warp+1]; int j = j_start + lane, j_d; int j_d = WARP_SIZE, j; for(j = j_start+lane; ; j+=WARP_SIZE){ int is_diag = __ballot_sync(warp_mask, ( (j < j_stop) && (A_col[j] == warp) ) ) ; j_d = __clz(is_diag); if(j_d != WARP_SIZE) break; } if(lane == 0) t_nnz_4r[warp+1] = j - j_start + (WARP_SIZE - j_d) - 1; */ } __global__ void _makeAH_warp(stype n, int AH_MINI_WARP_SIZE, vtype *A_val, itype *A_col, itype *A_row, vtype *w, vtype *C, vtype *AH_val, itype *AH_col, itype *AH_row){ itype tid = blockDim.x * blockIdx.x + threadIdx.x; itype warp = tid / AH_MINI_WARP_SIZE; if(warp >= n) return; int lane = tid % AH_MINI_WARP_SIZE; itype j_stop = A_row[warp+1]; for(int j=A_row[warp]+lane; j<j_stop; j+=AH_MINI_WARP_SIZE){ itype c = A_col[j]; if(c != warp){ vtype a = A_val[j]; itype offset = c > warp ? warp + 1 : warp; AH_col[j - offset] = c; vtype norm = c > warp ? C[warp] + C[c] : C[c] + C[warp]; if(norm > DBL_EPSILON){ vtype w_temp = c > warp ? w[warp] * w[c] : w[c] * w[warp]; AH_val[j - offset] = 1. - ( (2. * a * w_temp) / norm); }else AH_val[j - offset] = DBL_EPSILON; } } if(lane == 0){ AH_row[warp+1] = j_stop - (warp + 1); } if(tid == 0){ // set the first index of the row pointer to 0 AH_row[0] = 0; } } //#################################################################################### //### original __global__ void _makeAH(stype n, vtype *val, itype *col, itype *row, vtype *w, vtype *C, vtype *AH_val, itype *AH_col, itype *AH_row){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; itype r = i; itype j_start = row[i]; itype j_stop = row[i+1]; int j; // prima della diagonale, ogni elemento e' non zero int offset = r; for(j=j_start; j<j_stop; j++){ itype c = col[j]; vtype a = val[j]; // abbiamo la diagonale nulla if(c == r){ // metti l'offset a 1 in modo tale che i valori e colonne succesive vengano scritte in AH nella cella precedente offset += 1; }else{ // salva colonna AH_col[j - offset] = c; vtype norm = c > r ? C[r] + C[c] : C[c] + C[r]; if(norm > DBL_EPSILON){ //AH_val[j - offset] = 1. - ( (2. * a * w[r] * w[c]) / norm); vtype w_temp = c > r ? w[r] * w[c] : w[c] * w[r]; AH_val[j - offset] = 1. - ( (2. * a * w_temp) / norm); }else AH_val[j - offset] = DBL_EPSILON; } } // salva il fine riga AH_row[r+1] = j_stop - (r + 1); if(i == 0){ // set the first index of the row pointer to 0 AH_row[0] = 0; } } // funzione che presa in input la matrice CSR A, alloca e costruisce la rispettiva matrice AH CSR* Matching::makeAH(CSR *A, vector<vtype> *w){ assert(A->on_the_device); assert(w->on_the_device); stype n; n = A->n; // init a vector on the device vector<vtype> *C = Vector::init<vtype>(n, true, true); int miniwarp_size = CSRm::choose_mini_warp_size(A); gridblock gb = gb1d(n, makeC_BLOCKSIZE, false); hipLaunchKernelGGL(( _makeC), dim3(gb.g), dim3(gb.b), 0, 0, n, A->val, A->col, A->row, w->val, C->val); CSR *AH = CSRm::init(A->n, A->m, (A->nnz - A->n), true, true, A->is_symmetric); gb = gb1d(n, makeAH_BLOCKSIZE, true, miniwarp_size); hipLaunchKernelGGL(( _makeAH_warp), dim3(gb.g), dim3(gb.b), 0, 0, n, miniwarp_size, A->val, A->col, A->row, w->val, C->val, AH->val, AH->col, AH->row); Vector::free<vtype>(C); return AH; } // Binary operation for the CUB::Reduce in the find_Max_Min function struct AbsMin { template <typename T> __device__ __forceinline__ T operator()(const T &lhs, const T &rhs) const { T ab_lhs = fabs(lhs); T ab_rhs = fabs(rhs); return ab_lhs < ab_rhs ? ab_lhs : ab_rhs; } }; //#################################################################################### // make the vector c needed in the _make_w kernel __global__ void _make_c(stype n, vtype *val, itype *row, vtype *c, vtype *alpha_candidate){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; itype j_start = row[i]; itype j_stop = row[i+1]; vtype max = 0.; vtype min = DBL_MAX; vtype a; int j; for(j=j_start; j<j_stop; j++){ a = log( fabs(val[j]) ); if(a > max) max = a; if(a < min) min = a; } c[i] = max; alpha_candidate[i] = max - min; } //#################################################################################### #if MAXIMUM_PRODUCT_MATRIX_OP == 0 // Modify the values of the matrix A_HAT in order to transforms the objective from a maximum weight to maximum weight maximum cardinality __global__ void _make_w(stype n, vtype *val, itype *col, itype *row, vtype *alpha, vtype *C){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; itype j_start = row[i]; itype j_stop = row[i+1]; vtype alpha_def = *alpha; int j; for(j=j_start; j<j_stop; j++){ itype c = col[j]; vtype a = val[j]; val[j] = alpha_def + log( fabs(a) ) + (alpha_def - C[c]); } } #else // Modify the values of the matrix A_HAT in order to transforms the objective from a maximum weight to maximum weight maximum cardinality __global__ void _make_w(stype nnz, vtype *val, vtype *min){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= nnz) return; val[i] = log( fabs(val[i]) / (0.999 * (*min)) ); } #endif //#################################################################################### // find the max (op_type==0) or the absolute min (op_type==1) in the input device array (with CUB utility) vtype* find_Max_Min(vtype *a, stype n, int op_type){ void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; vtype *min_max = NULL; hipError_t err; err = hipMalloc((void**)&min_max, sizeof(vtype) * 1); CHECK_DEVICE(err); if(op_type == 0){ hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, a, min_max, n); // Allocate temporary storage err = hipMalloc(&d_temp_storage, temp_storage_bytes); CHECK_DEVICE(err); // Run max-reduction hipcub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, a, min_max, n); }else if(op_type == 1){ AbsMin absmin; hipcub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, a, min_max, n, absmin, DBL_MAX); // Allocate temporary storage err = hipMalloc(&d_temp_storage, temp_storage_bytes); CHECK_DEVICE(err); // Run max-reduction hipcub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, a, min_max, n, absmin, DBL_MAX); } err = hipFree(d_temp_storage); CHECK_DEVICE(err); return min_max; } //#################################################################################### // Funzione che prende in input la matrice AH ne modifica i valore (inplace) per ottenere la matrice dei pesi W per il Maximum Product Matching CSR* Matching::toMaximumProductMatrix(CSR *AH){ assert(AH->on_the_device); #if MAXIMUM_PRODUCT_MATRIX_OP == 0 stype n; n = AH->n; vector<vtype> *c = Vector::init<vtype>(n, true, true); vector<vtype> *alpha_candidate = Vector::init<vtype>(n, true, true); gridblock gb = gb1d(n, make_c_BLOCKSIZE, false); hipLaunchKernelGGL(( _make_c), dim3(gb.g), dim3(gb.b), 0, 0, n, AH->val, AH->row, c->val, alpha_candidate->val); // find alpha in alpha_candidate vtype *alpha = find_Max_Min(alpha_candidate->val, n, 0); Vector::free<vtype>(alpha_candidate); gb = gb1d(n, make_w_BLOCKSIZE, false); hipLaunchKernelGGL(( _make_w), dim3(gb.g), dim3(gb.b), 0, 0, n, AH->val, AH->col, AH->row, alpha, c->val); Vector::free<vtype>(c); CHECK_DEVICE( hipFree(alpha) ); #else // do W_data[j]=log(fabs(B_data[j])/(0.999*min_B)); stype nnz = AH->nnz; // find the min value vtype *min = find_Max_Min(AH->val, nnz, 1); gridblock gb = gb1d(nnz, make_w_BLOCKSIZE, false); hipLaunchKernelGGL(( _make_w), dim3(gb.g), dim3(gb.b), 0, 0, nnz, AH->val, min); CHECK_DEVICE( hipFree(min) ); #endif return AH; } //#################################################################################### vector<itype>* Matching::suitor(CSR *A, vector<vtype> *w){ assert(A->on_the_device && w->on_the_device); #if MATCHING_MOD_TYPE == 1 int miniwarp_size = CSRm::choose_mini_warp_size(A); gridblock gb = gb1d(A->n, write_T_BLOCKSIZE, true, miniwarp_size); hipLaunchKernelGGL(( _write_T_warp), dim3(gb.g), dim3(gb.b), 0, 0, A->n, miniwarp_size, A->val, A->col, A->row); #endif CSR *AH = Matching::makeAH(A, w); CSR *W = Matching::toMaximumProductMatrix(AH); vector<itype> *M = matchingAggregationContext::M_buffer; approx_match_gpu_suitor(W, M, matchingAggregationContext::ws_buffer, matchingAggregationContext::mutex_buffer); //std::cout << "CPU matching\n"; //M = approx_match_cpu_suitor<vtype>(W); #if MATCHING_MOD_TYPE == 0 M = unsymFix(M); #endif // W is an alias of AH CSRm::free(W); return M; }
689422c8ed2df48f16fad1fb24d5a899416f1f78.cu
#pragma once #include <cusparse.h> #include "matrix/CSR.h" #include "matchingAggregation.h" #include "unsymMatching.cu" #include "cub/cub.cuh" #include "cub/util_allocator.cuh" #include "cub/device/device_reduce.cuh" #include "suitor.cu" #define MIN(a,b) (((a)<(b))?(a):(b)) namespace Matching{ CSR* toMaximumProductMatrix(CSR *AH); CSR* makeAH(CSR *A, vector<vtype> *w); vector<itype>* suitor(CSR *A, vector<vtype> *w); } //#################################################################################### // ensure the numeric symmetry in the CSR matrix __global__ void _write_T(itype n, vtype *val, itype *col, itype *row){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; for(int j=row[i]; j<row[i+1]; j++){ itype c = col[j]; if(i < c) break; vtype v = val[j]; for(int jj=row[c]; jj<row[c+1]; jj++){ if(col[jj] == i){ val[jj] = v; break; } } } } //#################################################################################### #if TYPE_WRITE_T == 0 __forceinline__ __device__ int binsearch(int array[], unsigned int size, int value) { unsigned int low, high, medium; low=0; high=size; while(low<high) { medium=(high+low)/2; if(value > array[medium]) { low=medium+1; } else { high=medium; } } return low; } //################################################################################################ __global__ void _write_T_warp(itype n, int MINI_WARP_SIZE, vtype *A_val, itype *A_col, itype *A_row){ itype tid = blockDim.x * blockIdx.x + threadIdx.x; int warp = tid / MINI_WARP_SIZE; if(warp >= n) return; int lane = tid % MINI_WARP_SIZE; int mask_id = (tid % FULL_WARP) / MINI_WARP_SIZE; int warp_mask = getMaskByWarpID(MINI_WARP_SIZE, mask_id); vtype t; itype j_stop = A_row[warp+1]; for(int j=A_row[warp]+lane; j<j_stop; j+=MINI_WARP_SIZE){ itype c = A_col[j]; if(warp < c) break; int nc = A_row[c+1] - A_row[c]; int jj=binsearch(A_col+A_row[c], nc, warp); t=A_val[jj+A_row[c]]; A_val[j]=t; } } //################################################################################################ #elif TYPE_WRITE_T == 1 __global__ void _write_T_warp(itype n, int MINI_WARP_SIZE, vtype *A_val, itype *A_col, itype *A_row){ itype tid = blockDim.x * blockIdx.x + threadIdx.x; int warp = tid / MINI_WARP_SIZE; if(warp >= n) return; int lane = tid % MINI_WARP_SIZE; int mask_id = (tid % FULL_WARP) / MINI_WARP_SIZE; int warp_mask = getMaskByWarpID(MINI_WARP_SIZE, mask_id); itype j_stop = A_row[warp+1]; for(int j=A_row[warp]+lane; j<j_stop; j+=MINI_WARP_SIZE){ itype c = A_col[j]; if(warp < c) break; vtype v = A_val[j]; for(int jj=A_row[c]; jj<A_row[c+1]; jj++){ if(A_col[jj] == warp){ A_val[jj] = v; break; } } } } #endif //#################################################################################### // kernel che costruisce preventivamente il vettore C = d * w^2 usato in _makeAH __global__ void _makeC(stype n, vtype *val, itype *col, itype *row, vtype *w, vtype *C){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; itype r = i; itype j_start = row[i]; itype j_stop = row[i+1]; int j; for(j=j_start; j<j_stop; j++){ itype c = col[j]; // if is a diagonal element if(c == r){ C[r] = val[j] * pow(w[r], 2); break; } } } //#################################################################################### __global__ void _makeC_warp(stype n, int MINI_WARP_SIZE, vtype *A_val, itype *A_col, itype *A_row, vtype *w, vtype *C){ /* itype tid = blockDim.x * blockIdx.x + threadIdx.x; int warp = tid / MINI_WARP_SIZE; if(warp >= n) return; int lane = tid % MINI_WARP_SIZE; int mask_id = (tid % FULL_WARP) / MINI_WARP_SIZE; int warp_mask = getMaskByWarpID(MINI_WARP_SIZE, mask_id); itype j_start = A_row[warp]; itype j_stop = A_row[warp+1]; int j = j_start + lane, j_d; int j_d = WARP_SIZE, j; for(j = j_start+lane; ; j+=WARP_SIZE){ int is_diag = __ballot_sync(warp_mask, ( (j < j_stop) && (A_col[j] == warp) ) ) ; j_d = __clz(is_diag); if(j_d != WARP_SIZE) break; } if(lane == 0) t_nnz_4r[warp+1] = j - j_start + (WARP_SIZE - j_d) - 1; */ } __global__ void _makeAH_warp(stype n, int AH_MINI_WARP_SIZE, vtype *A_val, itype *A_col, itype *A_row, vtype *w, vtype *C, vtype *AH_val, itype *AH_col, itype *AH_row){ itype tid = blockDim.x * blockIdx.x + threadIdx.x; itype warp = tid / AH_MINI_WARP_SIZE; if(warp >= n) return; int lane = tid % AH_MINI_WARP_SIZE; itype j_stop = A_row[warp+1]; for(int j=A_row[warp]+lane; j<j_stop; j+=AH_MINI_WARP_SIZE){ itype c = A_col[j]; if(c != warp){ vtype a = A_val[j]; itype offset = c > warp ? warp + 1 : warp; AH_col[j - offset] = c; vtype norm = c > warp ? C[warp] + C[c] : C[c] + C[warp]; if(norm > DBL_EPSILON){ vtype w_temp = c > warp ? w[warp] * w[c] : w[c] * w[warp]; AH_val[j - offset] = 1. - ( (2. * a * w_temp) / norm); }else AH_val[j - offset] = DBL_EPSILON; } } if(lane == 0){ AH_row[warp+1] = j_stop - (warp + 1); } if(tid == 0){ // set the first index of the row pointer to 0 AH_row[0] = 0; } } //#################################################################################### //### original __global__ void _makeAH(stype n, vtype *val, itype *col, itype *row, vtype *w, vtype *C, vtype *AH_val, itype *AH_col, itype *AH_row){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; itype r = i; itype j_start = row[i]; itype j_stop = row[i+1]; int j; // prima della diagonale, ogni elemento e' non zero int offset = r; for(j=j_start; j<j_stop; j++){ itype c = col[j]; vtype a = val[j]; // abbiamo la diagonale nulla if(c == r){ // metti l'offset a 1 in modo tale che i valori e colonne succesive vengano scritte in AH nella cella precedente offset += 1; }else{ // salva colonna AH_col[j - offset] = c; vtype norm = c > r ? C[r] + C[c] : C[c] + C[r]; if(norm > DBL_EPSILON){ //AH_val[j - offset] = 1. - ( (2. * a * w[r] * w[c]) / norm); vtype w_temp = c > r ? w[r] * w[c] : w[c] * w[r]; AH_val[j - offset] = 1. - ( (2. * a * w_temp) / norm); }else AH_val[j - offset] = DBL_EPSILON; } } // salva il fine riga AH_row[r+1] = j_stop - (r + 1); if(i == 0){ // set the first index of the row pointer to 0 AH_row[0] = 0; } } // funzione che presa in input la matrice CSR A, alloca e costruisce la rispettiva matrice AH CSR* Matching::makeAH(CSR *A, vector<vtype> *w){ assert(A->on_the_device); assert(w->on_the_device); stype n; n = A->n; // init a vector on the device vector<vtype> *C = Vector::init<vtype>(n, true, true); int miniwarp_size = CSRm::choose_mini_warp_size(A); gridblock gb = gb1d(n, makeC_BLOCKSIZE, false); _makeC<<<gb.g, gb.b>>>(n, A->val, A->col, A->row, w->val, C->val); CSR *AH = CSRm::init(A->n, A->m, (A->nnz - A->n), true, true, A->is_symmetric); gb = gb1d(n, makeAH_BLOCKSIZE, true, miniwarp_size); _makeAH_warp<<<gb.g, gb.b>>>(n, miniwarp_size, A->val, A->col, A->row, w->val, C->val, AH->val, AH->col, AH->row); Vector::free<vtype>(C); return AH; } // Binary operation for the CUB::Reduce in the find_Max_Min function struct AbsMin { template <typename T> __device__ __forceinline__ T operator()(const T &lhs, const T &rhs) const { T ab_lhs = fabs(lhs); T ab_rhs = fabs(rhs); return ab_lhs < ab_rhs ? ab_lhs : ab_rhs; } }; //#################################################################################### // make the vector c needed in the _make_w kernel __global__ void _make_c(stype n, vtype *val, itype *row, vtype *c, vtype *alpha_candidate){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; itype j_start = row[i]; itype j_stop = row[i+1]; vtype max = 0.; vtype min = DBL_MAX; vtype a; int j; for(j=j_start; j<j_stop; j++){ a = log( fabs(val[j]) ); if(a > max) max = a; if(a < min) min = a; } c[i] = max; alpha_candidate[i] = max - min; } //#################################################################################### #if MAXIMUM_PRODUCT_MATRIX_OP == 0 // Modify the values of the matrix A_HAT in order to transforms the objective from a maximum weight to maximum weight maximum cardinality __global__ void _make_w(stype n, vtype *val, itype *col, itype *row, vtype *alpha, vtype *C){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= n) return; itype j_start = row[i]; itype j_stop = row[i+1]; vtype alpha_def = *alpha; int j; for(j=j_start; j<j_stop; j++){ itype c = col[j]; vtype a = val[j]; val[j] = alpha_def + log( fabs(a) ) + (alpha_def - C[c]); } } #else // Modify the values of the matrix A_HAT in order to transforms the objective from a maximum weight to maximum weight maximum cardinality __global__ void _make_w(stype nnz, vtype *val, vtype *min){ stype i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= nnz) return; val[i] = log( fabs(val[i]) / (0.999 * (*min)) ); } #endif //#################################################################################### // find the max (op_type==0) or the absolute min (op_type==1) in the input device array (with CUB utility) vtype* find_Max_Min(vtype *a, stype n, int op_type){ void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; vtype *min_max = NULL; cudaError_t err; err = cudaMalloc((void**)&min_max, sizeof(vtype) * 1); CHECK_DEVICE(err); if(op_type == 0){ cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, a, min_max, n); // Allocate temporary storage err = cudaMalloc(&d_temp_storage, temp_storage_bytes); CHECK_DEVICE(err); // Run max-reduction cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, a, min_max, n); }else if(op_type == 1){ AbsMin absmin; cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, a, min_max, n, absmin, DBL_MAX); // Allocate temporary storage err = cudaMalloc(&d_temp_storage, temp_storage_bytes); CHECK_DEVICE(err); // Run max-reduction cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, a, min_max, n, absmin, DBL_MAX); } err = cudaFree(d_temp_storage); CHECK_DEVICE(err); return min_max; } //#################################################################################### // Funzione che prende in input la matrice AH ne modifica i valore (inplace) per ottenere la matrice dei pesi W per il Maximum Product Matching CSR* Matching::toMaximumProductMatrix(CSR *AH){ assert(AH->on_the_device); #if MAXIMUM_PRODUCT_MATRIX_OP == 0 stype n; n = AH->n; vector<vtype> *c = Vector::init<vtype>(n, true, true); vector<vtype> *alpha_candidate = Vector::init<vtype>(n, true, true); gridblock gb = gb1d(n, make_c_BLOCKSIZE, false); _make_c<<<gb.g, gb.b>>>(n, AH->val, AH->row, c->val, alpha_candidate->val); // find alpha in alpha_candidate vtype *alpha = find_Max_Min(alpha_candidate->val, n, 0); Vector::free<vtype>(alpha_candidate); gb = gb1d(n, make_w_BLOCKSIZE, false); _make_w<<<gb.g, gb.b>>>(n, AH->val, AH->col, AH->row, alpha, c->val); Vector::free<vtype>(c); CHECK_DEVICE( cudaFree(alpha) ); #else // do W_data[j]=log(fabs(B_data[j])/(0.999*min_B)); stype nnz = AH->nnz; // find the min value vtype *min = find_Max_Min(AH->val, nnz, 1); gridblock gb = gb1d(nnz, make_w_BLOCKSIZE, false); _make_w<<<gb.g, gb.b>>>(nnz, AH->val, min); CHECK_DEVICE( cudaFree(min) ); #endif return AH; } //#################################################################################### vector<itype>* Matching::suitor(CSR *A, vector<vtype> *w){ assert(A->on_the_device && w->on_the_device); #if MATCHING_MOD_TYPE == 1 int miniwarp_size = CSRm::choose_mini_warp_size(A); gridblock gb = gb1d(A->n, write_T_BLOCKSIZE, true, miniwarp_size); _write_T_warp<<<gb.g, gb.b>>>(A->n, miniwarp_size, A->val, A->col, A->row); #endif CSR *AH = Matching::makeAH(A, w); CSR *W = Matching::toMaximumProductMatrix(AH); vector<itype> *M = matchingAggregationContext::M_buffer; approx_match_gpu_suitor(W, M, matchingAggregationContext::ws_buffer, matchingAggregationContext::mutex_buffer); //std::cout << "CPU matching\n"; //M = approx_match_cpu_suitor<vtype>(W); #if MATCHING_MOD_TYPE == 0 M = unsymFix(M); #endif // W is an alias of AH CSRm::free(W); return M; }
dd2048940a8d3335713009d54531fd1498879411.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime_api.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "header_def.cuh" __global__ void MSV_TEST(int* seq, int total, unsigned int* offset, float* sc, int* L, int* L_6r, int* mat, int base, int bias, int tbm, int tec, float scale, int hmm_size, int rib) /* total is the total # of residues */ { /*--------------------------------------------------------*/ /* Shared memory */ /*--------------------------------------------------------*/ extern __shared__ unsigned char sm[]; unsigned int* cache = (unsigned int*)sm; unsigned char* MMX = (unsigned char*)&cache[rib]; unsigned char* MAT = (unsigned char*)&MMX[rib * (hmm_size + 1)]; unsigned char* RED = (unsigned char*)&MAT[hmm_size * PROTEIN_TYPE]; /*--------------------------------------------------------*/ /* registers */ /*--------------------------------------------------------*/ const int row = blockIdx.y * blockDim.y + threadIdx.y; /* individual index for each warp across whole grid */ int xE, xJ, xB; int sv; int mmx; int i, j, p; int res; int count = 0; /* used to times of cycle for each block */ int tjb; unsigned int off; int Len; /*--------------------------------------------------------*/ /* Global to Shared (ONLY FOR SHARED VERSION) */ /*--------------------------------------------------------*/ for(p = threadIdx.y * 32 + threadIdx.x; p < hmm_size * PROTEIN_TYPE; p += rib * 32) /* +=32 32threads */ { /* CAUTIONsince each block needs MAT[] separately */ MAT[p] = mat[p]; /* So we dont use 'row' to do this job */ } __syncthreads(); /*--------------------------------------------------------*/ /* OUTER LOOP BEGIN !!! */ /*--------------------------------------------------------*/ while(row + rib * gridDim.y * count < total) /* row + ROW_IN_BLOCK * gridDim.y * count rowseqindex */ { Len = L_6r[row + rib * gridDim.y * count]; /* for reuse, so cache into "Len" of register */ off = offset[row + rib * gridDim.y * count]; /* offset (beginning point) for each seq */ i = 0; /* must be refresh for each new seq come in */ mmx = 0; xJ = 0; tjb = -1.0f * roundf(scale * logf(3.0f / (float) (L[row + rib * gridDim.y * count] + 3))); /* EACH seq, we need recalculate tjbv */ tjb = (tjb > 255.) ? 255 : (unsigned char) tjb; xB = subm(base, tjb); for(p = threadIdx.x; p < hmm_size + 1; p += 32) { MMX[threadIdx.y * (hmm_size+1) + p] = 0; /* 0 is -INFINITY, So here is initial for new seq come in */ } /*--------------------------------------------------------*/ /* MIDDLE LOOP BEGIN !!! */ /*--------------------------------------------------------*/ while(i < Len) { cache[threadIdx.y] = seq[off + i]; /* GLOBAL ACCESS: for read a compressed cell in */ if( ((cache[threadIdx.y] >> 25) & 0x0000001F) == 31) break; /* Immediately check the end of seq */ #pragma unroll for(j = 0; j < 6 ; j++) /* REPEAT 6 TIMES! SINCE this is a compressed cell */ { xE = 0; xB = subm(xB, tbm); res = ( cache[threadIdx.y] >> 25 - 5 * j ) & 0x0000001F; if(res == 31){ break; } mmx = (int)MMX[threadIdx.y * (hmm_size+1) + threadIdx.x]; /* IMPORTANT: pull mmx back to the head of segemnt */ /*--------------------------------------------------------*/ /* INNER LOOP BEGIN !!! */ /*--------------------------------------------------------*/ for(p = threadIdx.x; p < hmm_size; p += 32) /* bank-conflict free ADDRESSING */ { sv = max(mmx, xB); sv = addm(sv, bias); sv = subm(sv, MAT[res * hmm_size + p]); /* SHARED VERSION */ //sv = subm(sv, __ldg(&mat[res * HMM_SIZE + p])); /* READ-ONLY CACHE VERSION */ xE = max(xE, sv); if(p + 32 < hmm_size) mmx = (int)MMX[threadIdx.y * (hmm_size+1) + p + 32]; /* PROTECT FROM overflow of memory */ MMX[threadIdx.y * (hmm_size+1) + p + 1] = sv; } /* end inner alignment */ RED[threadIdx.y * 32 + threadIdx.x] = (unsigned char)xE; /* thread 0 - 15 for each row */ if(threadIdx.x < 16) { //problem under RELEASE running... RED[threadIdx.y * 32 + threadIdx.x] = max(RED[threadIdx.y * 32 + threadIdx.x], RED[threadIdx.y * 32 + threadIdx.x + 16]); RED[threadIdx.y * 32 + threadIdx.x] = max(RED[threadIdx.y * 32 + threadIdx.x], RED[threadIdx.y * 32 + threadIdx.x + 8]); RED[threadIdx.y * 32 + threadIdx.x] = max(RED[threadIdx.y * 32 + threadIdx.x], RED[threadIdx.y * 32 + threadIdx.x + 4]); RED[threadIdx.y * 32 + threadIdx.x] = max(RED[threadIdx.y * 32 + threadIdx.x], RED[threadIdx.y * 32 + threadIdx.x + 2]); RED[threadIdx.y * 32 + threadIdx.x] = max(RED[threadIdx.y * 32 + threadIdx.x], RED[threadIdx.y * 32 + threadIdx.x + 1]); } xE = (int)RED[threadIdx.y * 32]; /* Imrediately check high score sequence */ if( addm(xE, bias) == 255 ) { sc[row + rib * gridDim.y * count] = 999999.0f; break; /* break out MIDDLE LOOP, go next seq */ } /* get rest of parameters */ xE = subm(xE, tec); /* EC = EJ in MSV */ xJ = max(xJ, xE); /* xJ = max (xJ, xE - tEJ) */ xB = max(base, xJ); xB = subm(xB, tjb); /* xB = max (base, xJ) - tJB */ } i++; /* IMPORTANT: index++ for next MIDDLE CYCLE (next compressed cell of this seq) */ } /* end loop over sequence residues 1..L */ /* finally C->T, and add our missing precision on the NN,CC,JJ back */ if( abs(sc[row + rib * gridDim.y * count] - 999999.0f) < 1e-6 ) { /* do nothing */ } else { sc[row + rib * gridDim.y * count] = ((float) (xJ - tjb) - (float) base); sc[row + rib * gridDim.y * count] /= scale; sc[row + rib * gridDim.y * count] -= 3.0; /* that's ~ L \log \frac{L}{L+3}, for our NN,CC,JJ */ } count++; /* IMPORTANT: index++, for next OUTER CYCLE (index next new seq) */ } /* end loop over sequence database */ } /* -------------------------------------------------- * kernel warper * -------------------------------------------------- * * * hs: input variable -> HMM_SIZE * rib: input variable -> ROW_IN_BLOCK */ void MSV_warp(dim3 grid, dim3 block, int* SEQ, int total, unsigned int* OFFSET, float* SC, int* LEN, int* LEN_6R, int* MAT, HMMER_PROFILE* HMM, int device, int rib) { size_t shmem = rib * sizeof(unsigned int) + rib * (HMM->M + 1) * sizeof(unsigned char) + HMM->M * PROTEIN_TYPE * sizeof(unsigned char) + 32 * rib * sizeof(unsigned char); hipLaunchKernelGGL(( MSV_TEST), dim3(grid), dim3(block), shmem, 0, SEQ, total, OFFSET, SC, LEN, LEN_6R, MAT, HMM->base_b, HMM->bias_b, HMM->tbm_b, HMM->tec_b, HMM->scale_b, HMM->M, rib); //printf("GPU %d launched: %s\n", device, hipGetErrorString(hipGetLastError())); }
dd2048940a8d3335713009d54531fd1498879411.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_profiler_api.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "header_def.cuh" __global__ void MSV_TEST(int* seq, int total, unsigned int* offset, float* sc, int* L, int* L_6r, int* mat, int base, int bias, int tbm, int tec, float scale, int hmm_size, int rib) /* total is the total # of residues */ { /*--------------------------------------------------------*/ /* Shared memory */ /*--------------------------------------------------------*/ extern __shared__ unsigned char sm[]; unsigned int* cache = (unsigned int*)sm; unsigned char* MMX = (unsigned char*)&cache[rib]; unsigned char* MAT = (unsigned char*)&MMX[rib * (hmm_size + 1)]; unsigned char* RED = (unsigned char*)&MAT[hmm_size * PROTEIN_TYPE]; /*--------------------------------------------------------*/ /* registers */ /*--------------------------------------------------------*/ const int row = blockIdx.y * blockDim.y + threadIdx.y; /* individual index for each warp across whole grid */ int xE, xJ, xB; int sv; int mmx; int i, j, p; int res; int count = 0; /* used to times of cycle for each block */ int tjb; unsigned int off; int Len; /*--------------------------------------------------------*/ /* Global to Shared (ONLY FOR SHARED VERSION) */ /*--------------------------------------------------------*/ for(p = threadIdx.y * 32 + threadIdx.x; p < hmm_size * PROTEIN_TYPE; p += rib * 32) /* 这里 +=32 是因为一行有32个threads */ { /* CAUTION:since each block needs MAT[] separately */ MAT[p] = mat[p]; /* So we dont use 'row' to do this job */ } __syncthreads(); /*--------------------------------------------------------*/ /* OUTER LOOP BEGIN !!! */ /*--------------------------------------------------------*/ while(row + rib * gridDim.y * count < total) /* row + ROW_IN_BLOCK * gridDim.y * count 代表本row下一个目标seq的index */ { Len = L_6r[row + rib * gridDim.y * count]; /* for reuse, so cache into "Len" of register */ off = offset[row + rib * gridDim.y * count]; /* offset (beginning point) for each seq */ i = 0; /* must be refresh for each new seq come in */ mmx = 0; xJ = 0; tjb = -1.0f * roundf(scale * logf(3.0f / (float) (L[row + rib * gridDim.y * count] + 3))); /* EACH seq, we need recalculate tjbv */ tjb = (tjb > 255.) ? 255 : (unsigned char) tjb; xB = subm(base, tjb); for(p = threadIdx.x; p < hmm_size + 1; p += 32) { MMX[threadIdx.y * (hmm_size+1) + p] = 0; /* 0 is -INFINITY, So here is initial for new seq come in */ } /*--------------------------------------------------------*/ /* MIDDLE LOOP BEGIN !!! */ /*--------------------------------------------------------*/ while(i < Len) { cache[threadIdx.y] = seq[off + i]; /* GLOBAL ACCESS: for read a compressed cell in */ if( ((cache[threadIdx.y] >> 25) & 0x0000001F) == 31) break; /* Immediately check the end of seq */ #pragma unroll for(j = 0; j < 6 ; j++) /* REPEAT 6 TIMES! SINCE this is a compressed cell */ { xE = 0; xB = subm(xB, tbm); res = ( cache[threadIdx.y] >> 25 - 5 * j ) & 0x0000001F; if(res == 31){ break; } mmx = (int)MMX[threadIdx.y * (hmm_size+1) + threadIdx.x]; /* IMPORTANT: pull mmx back to the head of segemnt */ /*--------------------------------------------------------*/ /* INNER LOOP BEGIN !!! */ /*--------------------------------------------------------*/ for(p = threadIdx.x; p < hmm_size; p += 32) /* bank-conflict free ADDRESSING */ { sv = max(mmx, xB); sv = addm(sv, bias); sv = subm(sv, MAT[res * hmm_size + p]); /* SHARED VERSION */ //sv = subm(sv, __ldg(&mat[res * HMM_SIZE + p])); /* READ-ONLY CACHE VERSION */ xE = max(xE, sv); if(p + 32 < hmm_size) mmx = (int)MMX[threadIdx.y * (hmm_size+1) + p + 32]; /* PROTECT FROM overflow of memory */ MMX[threadIdx.y * (hmm_size+1) + p + 1] = sv; } /* end inner alignment */ RED[threadIdx.y * 32 + threadIdx.x] = (unsigned char)xE; /* thread 0 - 15 for each row */ if(threadIdx.x < 16) { //problem under RELEASE running... RED[threadIdx.y * 32 + threadIdx.x] = max(RED[threadIdx.y * 32 + threadIdx.x], RED[threadIdx.y * 32 + threadIdx.x + 16]); RED[threadIdx.y * 32 + threadIdx.x] = max(RED[threadIdx.y * 32 + threadIdx.x], RED[threadIdx.y * 32 + threadIdx.x + 8]); RED[threadIdx.y * 32 + threadIdx.x] = max(RED[threadIdx.y * 32 + threadIdx.x], RED[threadIdx.y * 32 + threadIdx.x + 4]); RED[threadIdx.y * 32 + threadIdx.x] = max(RED[threadIdx.y * 32 + threadIdx.x], RED[threadIdx.y * 32 + threadIdx.x + 2]); RED[threadIdx.y * 32 + threadIdx.x] = max(RED[threadIdx.y * 32 + threadIdx.x], RED[threadIdx.y * 32 + threadIdx.x + 1]); } xE = (int)RED[threadIdx.y * 32]; /* Imrediately check high score sequence */ if( addm(xE, bias) == 255 ) { sc[row + rib * gridDim.y * count] = 999999.0f; break; /* break out MIDDLE LOOP, go next seq */ } /* get rest of parameters */ xE = subm(xE, tec); /* EC = EJ in MSV */ xJ = max(xJ, xE); /* xJ = max (xJ, xE - tEJ) */ xB = max(base, xJ); xB = subm(xB, tjb); /* xB = max (base, xJ) - tJB */ } i++; /* IMPORTANT: index++ for next MIDDLE CYCLE (next compressed cell of this seq) */ } /* end loop over sequence residues 1..L */ /* finally C->T, and add our missing precision on the NN,CC,JJ back */ if( abs(sc[row + rib * gridDim.y * count] - 999999.0f) < 1e-6 ) { /* do nothing */ } else { sc[row + rib * gridDim.y * count] = ((float) (xJ - tjb) - (float) base); sc[row + rib * gridDim.y * count] /= scale; sc[row + rib * gridDim.y * count] -= 3.0; /* that's ~ L \log \frac{L}{L+3}, for our NN,CC,JJ */ } count++; /* IMPORTANT: index++, for next OUTER CYCLE (index next new seq) */ } /* end loop over sequence database */ } /* -------------------------------------------------- * kernel warper * -------------------------------------------------- * * * hs: input variable -> HMM_SIZE * rib: input variable -> ROW_IN_BLOCK */ void MSV_warp(dim3 grid, dim3 block, int* SEQ, int total, unsigned int* OFFSET, float* SC, int* LEN, int* LEN_6R, int* MAT, HMMER_PROFILE* HMM, int device, int rib) { size_t shmem = rib * sizeof(unsigned int) + rib * (HMM->M + 1) * sizeof(unsigned char) + HMM->M * PROTEIN_TYPE * sizeof(unsigned char) + 32 * rib * sizeof(unsigned char); MSV_TEST<<<grid, block, shmem>>>(SEQ, total, OFFSET, SC, LEN, LEN_6R, MAT, HMM->base_b, HMM->bias_b, HMM->tbm_b, HMM->tec_b, HMM->scale_b, HMM->M, rib); //printf("GPU %d launched: %s\n", device, cudaGetErrorString(cudaGetLastError())); }
f9dc1349932332d36bab479650fc133855fbaf0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] - b[i]; } }
f9dc1349932332d36bab479650fc133855fbaf0d.cu
#include "includes.h" __global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] - b[i]; } }
007d951e7767f09bce98c5f9b6626ea4fb19b1c7.hip
// !!! This is a file automatically generated by hipify!!! // Sample04_MNIST.cpp : // #include "stdafx.h" #include<crtdbg.h> #include<cuda.h> #include<cuda_runtime.h> #include<thrust/device_vector.h> #include<vector> #include<boost/filesystem/path.hpp> #include<boost/uuid/uuid_generators.hpp> #include"Library/Common/BatchDataNoListGenerator.h" #include"Library/DataFormat/Binary.h" #include"Library/NeuralNetwork/LayerDLLManager.h" #include"Library/NeuralNetwork/LayerDataManager.h" #include"Library/NeuralNetwork/NetworkParserXML.h" #include"Library/Layer/IOData/IODataLayer.h" #include"Layer/Connect/ILayerConnectData.h" #include"Layer/NeuralNetwork/INeuralNetwork.h" #include"Utility/NeuralNetworkLayer.h" #include"Utility/NeuralNetworkMaker.h" #include"Library/NeuralNetwork/Initializer.h" using namespace Gravisbell; #define USE_GPU 1 #define USE_HOST_MEMORY 1 #define USE_BATCHNORM 1 #define USE_DROPOUT 1 #define USE_BATCH_SIZE 4 #define MAX_EPOCH 20 /** @param o_ppDataLayerTeach @param o_ppDataLayerTest @param i_testRate %01 @param i_formatFilePath XML @param i_dataFilePath */ Gravisbell::ErrorCode LoadSampleData_image( Layer::IOData::IIODataLayer** o_ppDataLayerTeach, Layer::IOData::IIODataLayer** o_ppDataLayerTest, F32 i_testRate, boost::filesystem::wpath i_formatFilePath, boost::filesystem::wpath i_dataFilePath); Gravisbell::ErrorCode LoadSampleData_label( Layer::IOData::IIODataLayer** o_ppDataLayerTeach, Layer::IOData::IIODataLayer** o_ppDataLayerTest, F32 i_testRate, boost::filesystem::wpath i_formatFilePath, boost::filesystem::wpath i_dataFilePath); /** */ Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver01(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& inputDataStruct, const IODataStruct& outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver02(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver03(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver04(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver05(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver06(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver07(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver08(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver09(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver10(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver11(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver12(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver13(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver14(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver15(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver16(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& inputDataStruct, const IODataStruct& outputDataStruct) { return CreateNeuralNetwork_ver16(layerDLLManager, layerDataManager, inputDataStruct, outputDataStruct); } /** */ Gravisbell::ErrorCode LearnWithCalculateSampleError( Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkLearn, Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkSample, Layer::IOData::IIODataLayer* pTeachInputLayer, Layer::IOData::IIODataLayer* pTeachTeachLayer, Layer::IOData::IIODataLayer* pSampleInputLayer, Layer::IOData::IIODataLayer* pSampleTeachLayer, const U32 BATCH_SIZE, const U32 LEARN_TIMES); int _tmain(int argc, _TCHAR* argv[]) { #ifdef _DEBUG ::_CrtSetDbgFlag(_CRTDBG_LEAK_CHECK_DF | _CRTDBG_ALLOC_MEM_DF); #endif boost::filesystem::path workDirPath = boost::filesystem::current_path(); //void* pValue = NULL; //hipMalloc(&pValue, 16); //hipFree(&pValue); // Layer::IOData::IIODataLayer* pDataLayerTeach_Input = NULL; Layer::IOData::IIODataLayer* pDataLayerTeach_Output = NULL; Layer::IOData::IIODataLayer* pDataLayerTest_Input = NULL; Layer::IOData::IIODataLayer* pDataLayerTest_Output = NULL; #ifndef _WIN64 if(LoadSampleData_image(&pDataLayerTeach_Input, &pDataLayerTest_Input, 0.1f, L"../SampleData/MNIST/DataFormat_image.xml", L"../SampleData/MNIST/train-images.idx3-ubyte") != Gravisbell::ErrorCode::ERROR_CODE_NONE) #else if(LoadSampleData_image(&pDataLayerTeach_Input, &pDataLayerTest_Input, 0.1f, L"../../SampleData/MNIST/DataFormat_image.xml", L"../../SampleData/MNIST/train-images.idx3-ubyte") != Gravisbell::ErrorCode::ERROR_CODE_NONE) #endif { return -1; } #ifndef _WIN64 if(LoadSampleData_label(&pDataLayerTeach_Output, &pDataLayerTest_Output, 0.1f, L"../SampleData/MNIST/DataFormat_label.xml", L"../SampleData/MNIST/train-labels.idx1-ubyte") != Gravisbell::ErrorCode::ERROR_CODE_NONE) #else if(LoadSampleData_label(&pDataLayerTeach_Output, &pDataLayerTest_Output, 0.1f, L"../../SampleData/MNIST/DataFormat_label.xml", L"../../SampleData/MNIST/train-labels.idx1-ubyte") != Gravisbell::ErrorCode::ERROR_CODE_NONE) #endif { delete pDataLayerTeach_Input; delete pDataLayerTest_Input; return -1; } // DLL #if USE_GPU Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager = Gravisbell::Utility::NeuralNetworkLayer::CreateLayerDLLManagerGPU(L"./"); #else Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager = Gravisbell::Utility::NeuralNetworkLayer::CreateLayerDLLManagerCPU(L"./"); #endif if(pLayerDLLManager == NULL) { delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; return -1; } // Gravisbell::Layer::NeuralNetwork::ILayerDataManager* pLayerDataManager = Gravisbell::Layer::NeuralNetwork::CreateLayerDataManager(); if(pLayerDataManager == NULL) { delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDLLManager; return -1; } // //#ifdef _DEBUG Gravisbell::Layer::NeuralNetwork::GetInitializerManager().InitializeRandomParameter(0); //#endif // Gravisbell::Layer::Connect::ILayerConnectData* pNeuralNetworkData = CreateNeuralNetwork(*pLayerDLLManager, *pLayerDataManager, pDataLayerTeach_Input->GetInputDataStruct(), pDataLayerTeach_Output->GetDataStruct()); if(pNeuralNetworkData == NULL) { delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDataManager; delete pLayerDLLManager; return -1; } // printf("\n"); Gravisbell::Utility::NeuralNetworkLayer::WriteNetworkToBinaryFile(*pNeuralNetworkData, L"../../LayerData/test.bin"); // { pLayerDataManager->EraseLayerByGUID(pNeuralNetworkData->GetGUID()); pNeuralNetworkData = NULL; Gravisbell::Layer::ILayerData* pTmpNeuralNetworkData = NULL; printf("\n"); Gravisbell::Utility::NeuralNetworkLayer::ReadNetworkFromBinaryFile(*pLayerDLLManager, &pTmpNeuralNetworkData, L"../../LayerData/test.bin"); // printf("2\n"); Gravisbell::Utility::NeuralNetworkLayer::WriteNetworkToBinaryFile(*pTmpNeuralNetworkData, L"../../LayerData/test2.bin"); printf("\n"); pNeuralNetworkData = dynamic_cast<Gravisbell::Layer::Connect::ILayerConnectData*>(pTmpNeuralNetworkData); } //// XML //Gravisbell::Layer::NeuralNetwork::Parser::SaveLayerToXML(*pNeuralNetworkData, L"../../LayerData/", L"test.xml"); //// //for(auto pLayerData : lppLayerData) // delete pLayerData; //lppLayerData.clear(); //pNeuralNetworkData = Gravisbell::Layer::NeuralNetwork::Parser::CreateLayerFromXML(*pLayerDLLManager, *pLayerDataManager, L"../../LayerData/", L"test.xml"); //// //Gravisbell::Utility::NeuralNetworkLayer::WriteNetworkToBinaryFile(*pNeuralNetworkData, "../../LayerData/test2.bin"); //// XML //Gravisbell::Layer::NeuralNetwork::Parser::SaveLayerToXML(*pNeuralNetworkData, L"../../LayerData/", L"test2.xml"); // Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkLearn = NULL; { #if USE_HOST_MEMORY Layer::ILayerBase* pLayer = pNeuralNetworkData->CreateLayer(boost::uuids::random_generator()().data, &pDataLayerTeach_Input->GetOutputDataStruct(), 1); #else Layer::ILayerBase* pLayer = pNeuralNetworkData->CreateLayer_device(boost::uuids::random_generator()().data, &pDataLayerTeach_Input->GetOutputDataStruct(), 1); #endif pNeuralNetworkLearn = dynamic_cast<Layer::NeuralNetwork::INeuralNetwork*>(pLayer); if(pNeuralNetworkLearn == NULL) { if(pLayer) delete pLayer; } } if(pNeuralNetworkLearn == NULL) { delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDataManager; delete pLayerDLLManager; return -1; } // Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkTest = NULL; { #if USE_HOST_MEMORY Layer::ILayerBase* pLayer = pNeuralNetworkData->CreateLayer(boost::uuids::random_generator()().data, &pDataLayerTeach_Input->GetOutputDataStruct(), 1); #else Layer::ILayerBase* pLayer = pNeuralNetworkData->CreateLayer_device(boost::uuids::random_generator()().data, &pDataLayerTeach_Input->GetOutputDataStruct(), 1); #endif pNeuralNetworkTest = dynamic_cast<Layer::NeuralNetwork::INeuralNetwork*>(pLayer); if(pNeuralNetworkTest == NULL) { if(pLayer) delete pLayer; } } if(pNeuralNetworkTest == NULL) { delete pNeuralNetworkLearn; delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDataManager; delete pLayerDLLManager; return -1; } // , { time_t startTime = time(NULL); // if(::LearnWithCalculateSampleError(pNeuralNetworkLearn, pNeuralNetworkTest, pDataLayerTeach_Input, pDataLayerTeach_Output, pDataLayerTest_Input, pDataLayerTest_Output, USE_BATCH_SIZE, MAX_EPOCH) != ErrorCode::ERROR_CODE_NONE) { delete pNeuralNetworkLearn; delete pNeuralNetworkTest; delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDataManager; delete pLayerDLLManager; return -1; } time_t endTime = time(NULL); printf("(s) : %ld\n", (endTime - startTime)); } // delete pNeuralNetworkData; delete pNeuralNetworkLearn; delete pNeuralNetworkTest; delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDataManager; delete pLayerDLLManager; printf("Press any key to continue"); getc(stdin); return 0; } /** @param o_ppDataLayerTeach @param o_ppDataLayerTest @param i_testRate %01 @param i_formatFilePath XML @param i_dataFilePath */ Gravisbell::ErrorCode LoadSampleData_image( Layer::IOData::IIODataLayer** o_ppDataLayerTeach, Layer::IOData::IIODataLayer** o_ppDataLayerTest, F32 i_testRate, boost::filesystem::wpath i_formatFilePath, boost::filesystem::wpath i_dataFilePath) { // Gravisbell::DataFormat::Binary::IDataFormat* pDataFormat = Gravisbell::DataFormat::Binary::CreateDataFormatFromXML(i_formatFilePath.c_str()); if(pDataFormat == NULL) return Gravisbell::ErrorCode::ERROR_CODE_COMMON_FILE_NOT_FOUND; // std::vector<BYTE> lpBuf; { FILE* fp = _wfopen(i_dataFilePath.c_str(), L"rb"); if(fp == NULL) { delete pDataFormat; return Gravisbell::ErrorCode::ERROR_CODE_COMMON_FILE_NOT_FOUND; } fseek(fp, 0, SEEK_END); U32 fileSize = ftell(fp); lpBuf.resize(fileSize); fseek(fp, 0, SEEK_SET); fread(&lpBuf[0], 1, fileSize, fp); fclose(fp); } // U32 bufPos = 0; // bufPos = pDataFormat->LoadBinary(&lpBuf[0], (U32)lpBuf.size()); // Gravisbell::IODataStruct dataStruct(1, pDataFormat->GetVariableValue(L"columns"), pDataFormat->GetVariableValue(L"rows"), 1); #if USE_GPU #if USE_HOST_MEMORY *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); #else *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerGPU_host(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerGPU_host(dataStruct); //*o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerGPU_device(dataStruct); //*o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerGPU_device(dataStruct); #endif #else *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); #endif std::vector<F32> lpTmpBuf(dataStruct.GetDataCount()); // U32 dataCount = (U32)pDataFormat->GetVariableValue(L"images"); U32 teachDataCount = (U32)(dataCount*(1.0f - i_testRate)); for(U32 imageNum=0; imageNum<dataCount; imageNum++) { if(bufPos + dataStruct.GetDataCount() > lpBuf.size()) break; // U08 -> F32 for(U32 i=0; i<lpTmpBuf.size(); i++) { lpTmpBuf[i] = (F32)lpBuf[bufPos + i] / 0xFF; } if(imageNum < teachDataCount) (*o_ppDataLayerTeach)->AddData(&lpTmpBuf[0]); else (*o_ppDataLayerTest)->AddData(&lpTmpBuf[0]); bufPos += dataStruct.GetDataCount(); } // delete pDataFormat; return Gravisbell::ErrorCode::ERROR_CODE_NONE; } Gravisbell::ErrorCode LoadSampleData_label( Layer::IOData::IIODataLayer** o_ppDataLayerTeach, Layer::IOData::IIODataLayer** o_ppDataLayerTest, F32 i_testRate, boost::filesystem::wpath i_formatFilePath, boost::filesystem::wpath i_dataFilePath) { // Gravisbell::DataFormat::Binary::IDataFormat* pDataFormat = Gravisbell::DataFormat::Binary::CreateDataFormatFromXML(i_formatFilePath.c_str()); if(pDataFormat == NULL) return Gravisbell::ErrorCode::ERROR_CODE_COMMON_FILE_NOT_FOUND; // std::vector<BYTE> lpBuf; { FILE* fp = _wfopen(i_dataFilePath.c_str(), L"rb"); if(fp == NULL) { delete pDataFormat; return Gravisbell::ErrorCode::ERROR_CODE_COMMON_FILE_NOT_FOUND; } fseek(fp, 0, SEEK_END); U32 fileSize = ftell(fp); lpBuf.resize(fileSize); fseek(fp, 0, SEEK_SET); fread(&lpBuf[0], 1, fileSize, fp); fclose(fp); } // U32 bufPos = 0; // bufPos = pDataFormat->LoadBinary(&lpBuf[0], (U32)lpBuf.size()); // Gravisbell::IODataStruct dataStruct(10, 1, 1, 1); #if USE_GPU #if USE_HOST_MEMORY *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); #else *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerGPU_host(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerGPU_host(dataStruct); // *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerGPU_device(dataStruct); // *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerGPU_device(dataStruct); #endif #else *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); #endif std::vector<F32> lpTmpBuf(dataStruct.ch); // U32 dataCount = (U32)pDataFormat->GetVariableValue(L"images"); U32 teachDataCount = (U32)(dataCount*(1.0f - i_testRate)); for(U32 imageNum=0; imageNum<dataCount; imageNum++) { // U08 -> F32 for(U32 i=0; i<lpTmpBuf.size(); i++) { if(i == lpBuf[bufPos]) lpTmpBuf[i] = 1.0f; else lpTmpBuf[i] = 0.0f; } if(imageNum < teachDataCount) (*o_ppDataLayerTeach)->AddData(&lpTmpBuf[0]); else (*o_ppDataLayerTest)->AddData(&lpTmpBuf[0]); bufPos += 1; } // delete pDataFormat; return Gravisbell::ErrorCode::ERROR_CODE_NONE; } /** */ Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver01(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { using namespace Gravisbell::Utility::NeuralNetworkLayer; Gravisbell::ErrorCode err; // Layer::Connect::ILayerConnectData* pNeuralNetwork = CreateNeuralNetwork(layerDLLManager, layerDataManager, 1); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); // err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateGaussianNoiseLayer(layerDLLManager, layerDataManager, 0.0f, 0.1f), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 1 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 4, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_BATCHNORM err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_DROPOUT err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateDropoutLayer(layerDLLManager, layerDataManager, 0.2f), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif #if 1 // Single // 2 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_BATCHNORM err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_DROPOUT err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateDropoutLayer(layerDLLManager, layerDataManager, 0.5f), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif #if 0 // Expand // 3 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_BATCHNORM err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_DROPOUT err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateDropoutLayer(layerDLLManager, layerDataManager, inputDataStruct, 0.5f)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif // 4 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_BATCHNORM err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_DROPOUT err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateDropoutLayer(layerDLLManager, layerDataManager, inputDataStruct, 0.5f)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif // 5 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_BATCHNORM err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_DROPOUT err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateDropoutLayer(layerDLLManager, layerDataManager, inputDataStruct, 0.5f)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif #endif // Expand #elif 0 // MergeInput // 1GUID Gravisbell::GUID lastLayerGUID_A = lastLayerGUID; Gravisbell::GUID lastLayerGUID_B = lastLayerGUID; // 2A { err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_A, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_A, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_A, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_A, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_A, &i_inputDataStruct, 1).ch)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_A, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_A, CreateDropoutLayer(layerDLLManager, layerDataManager, 0.5f)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; } // 2B { err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_B, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_B, &i_inputDataStruct, 1).ch, Vector3D<S32>(7,7,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(3,3,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_B, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_B, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_B, &i_inputDataStruct, 1).ch)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_B, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_B, CreateDropoutLayer(layerDLLManager, layerDataManager, 0.5f)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; } // A,B err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateMergeInputLayer(layerDLLManager, layerDataManager), lastLayerGUID_A, lastLayerGUID_B); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #elif 1 // ResNet // Gravisbell::GUID lastLayerGUID_shortCut = lastLayerGUID; // 2 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateResidualLayer(layerDLLManager, layerDataManager), lastLayerGUID, lastLayerGUID_shortCut); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; //// A,B //err = AddLayerToNetworkLast( // *pNeuralNetwork, // lppLayerData, // inputDataStruct, lastLayerGUID, // CreateMergeInputLayer(layerDLLManager, inputDataStruct, inputDataStruct_shortCut), // lastLayerGUID, lastLayerGUID_shortCut); //if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; //err = AddLayerToNetworkLast( // *pNeuralNetwork, // lppLayerData, // inputDataStruct, lastLayerGUID, // CreateBatchNormalizationLayer(layerDLLManager, inputDataStruct)); //if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; //err = AddLayerToNetworkLast( // *pNeuralNetwork, // lppLayerData, // inputDataStruct, lastLayerGUID, // CreateDropoutLayer(layerDLLManager, inputDataStruct, 0.5f)); //if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #elif 0// UpSampling // 2 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateUpSamplingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), true)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; //err = AddLayerToNetworkLast( // *pNeuralNetwork, // lppLayerData, // inputDataStruct, lastLayerGUID, // CreateBatchNormalizationLayer(layerDLLManager, inputDataStruct)); //if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; //err = AddLayerToNetworkLast( // *pNeuralNetwork, // lppLayerData, // inputDataStruct, lastLayerGUID, // CreateDropoutLayer(layerDLLManager, inputDataStruct, 0.5f)); //if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #else #endif // 3 #if 1 // err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateFullyConnectLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).GetDataCount(), i_outputDataStruct.GetDataCount()), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"softmax_ALL_crossEntropy"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #elif 0 // GlobalAveragePooling // (2ch) err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, inputDataStruct, Vector3D<S32>(5,5,1), outputDataStruct.GetDataCount(), Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // Pooling err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateGlobalAveragePoolingLayer(layerDLLManager, layerDataManager, inputDataStruct)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, inputDataStruct, L"softmax_ALL_crossEntropy")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #else #endif // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"SGD"); pNeuralNetwork->SetOptimizerHyperParameter(L"LearnCoeff", 0.005f); // pNeuralNetwork->ChangeOptimizer(L"Adam"); return pNeuralNetwork; } /** */ Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver02(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { using namespace Gravisbell::Utility::NeuralNetworkLayer; Gravisbell::ErrorCode err; // Layer::Connect::ILayerConnectData* pNeuralNetwork = CreateNeuralNetwork(layerDLLManager, layerDataManager, 1); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); // 1 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 4, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationAllLayer(layerDLLManager, layerDataManager), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 2 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // Gravisbell::GUID lastLayerGUID_chA = lastLayerGUID; Gravisbell::GUID lastLayerGUID_chB = lastLayerGUID; Gravisbell::GUID lastLayerGUID_chC = lastLayerGUID; // A { // err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chA, CreateChooseChannelLayer(layerDLLManager, layerDataManager, 0, 4), false ); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chA, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_chA, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chA, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; } // B { // err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chB, CreateChooseChannelLayer(layerDLLManager, layerDataManager, 4, 4), false ); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chB, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_chB, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chB, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; } // C { // err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chC, CreateChooseChannelLayer(layerDLLManager, layerDataManager, 8, 8), false ); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chC, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_chC, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chC, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; } // err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateMergeAverageLayer(layerDLLManager, layerDataManager, LayerMergeType::LYAERMERGETYPE_MIN), false, lastLayerGUID_chA, lastLayerGUID_chB, lastLayerGUID_chC); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 4 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateFullyConnectLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).GetDataCount(), i_outputDataStruct.GetDataCount()), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"softmax_ALL_crossEntropy"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // // pNeuralNetwork->ChangeOptimizer(L"SGD"); // pNeuralNetwork->SetOptimizerHyperParameter(L"LearnCoeff", 0.005f); pNeuralNetwork->ChangeOptimizer(L"Adam"); return pNeuralNetwork; } /** */ Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver03(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); // 1 lastLayerGUID = pNetworkMaker->AddConvolutionLayer(lastLayerGUID, Vector3D<S32>(5,5,1), 4, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); // lastLayerGUID = pNetworkMaker->AddNormalizationScaleLayer(lastLayerGUID); // 2 lastLayerGUID = pNetworkMaker->AddConvolutionLayer(lastLayerGUID, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); // Gravisbell::GUID lastLayerGUID_chA = lastLayerGUID; Gravisbell::GUID lastLayerGUID_chB = lastLayerGUID; Gravisbell::GUID lastLayerGUID_chC = lastLayerGUID; // A { // lastLayerGUID_chA = pNetworkMaker->AddChooseChannelLayer(lastLayerGUID_chA, 0, 4); lastLayerGUID_chA = pNetworkMaker->AddConvolutionLayer(lastLayerGUID_chA, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID_chA = pNetworkMaker->AddActivationLayer(lastLayerGUID_chA, L"ReLU"); } // B { // lastLayerGUID_chB = pNetworkMaker->AddChooseChannelLayer(lastLayerGUID_chB, 4, 4); lastLayerGUID_chB = pNetworkMaker->AddConvolutionLayer(lastLayerGUID_chB, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID_chB = pNetworkMaker->AddActivationLayer(lastLayerGUID_chB, L"ReLU"); } // C { // lastLayerGUID_chC = pNetworkMaker->AddChooseChannelLayer(lastLayerGUID_chC, 8, 8); lastLayerGUID_chC = pNetworkMaker->AddConvolutionLayer(lastLayerGUID_chC, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID_chC = pNetworkMaker->AddActivationLayer(lastLayerGUID_chC, L"ReLU"); } // lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LayerMergeType::LYAERMERGETYPE_MIN, lastLayerGUID_chA, lastLayerGUID_chB, lastLayerGUID_chC); // 4 lastLayerGUID = pNetworkMaker->AddConvolutionLayer(lastLayerGUID, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); // lastLayerGUID = pNetworkMaker->AddFullyConnectLayer(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"Default", L"glorot_normal"); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"softmax_ALL_crossEntropy"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // // pNeuralNetwork->ChangeOptimizer(L"SGD"); // pNeuralNetwork->SetOptimizerHyperParameter(L"LearnCoeff", 0.005f); pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } /** */ Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver04(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); // lastLayerGUID = pNetworkMaker->AddReshapeMirrorXLayer(lastLayerGUID); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 4, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CBAD(lastLayerGUID, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 64, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 32, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver05(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 1, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddReshapeLayer(lastLayerGUID, IODataStruct(14, 56, 1, 1)); lastLayerGUID = pNetworkMaker->AddNormalizationScaleLayer(lastLayerGUID); lastLayerGUID = pNetworkMaker->AddReshapeSquareZeroSideLeftTopLayer(lastLayerGUID, 10, 6); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 4, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 64, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 32, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver06(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 1024, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 512, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 256, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 128, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 64, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 32, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver07(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddChooseBoxLayer(lastLayerGUID, Vector3D<S32>(4,4,0), Vector3D<S32>(20,20,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 256, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 128, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 64, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 32, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver08(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddDilatedConvolutionLayer(lastLayerGUID, Vector3D<S32>(3,3,1), 8, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,0)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); lastLayerGUID = pNetworkMaker->AddDilatedConvolutionLayer(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1), Vector3D<S32>(0,0,0)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver09(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddValue2SignalArrayLayer(lastLayerGUID, 0.0f, 1.0, 8); lastLayerGUID = pNetworkMaker->AddDilatedConvolutionLayer(lastLayerGUID, Vector3D<S32>(3,3,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,1), Vector3D<S32>(0,0,0)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); lastLayerGUID = pNetworkMaker->AddDilatedConvolutionLayer(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,1), Vector3D<S32>(0,0,0)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver10(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); Gravisbell::GUID bypassLayer = lastLayerGUID; { lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); } lastLayerGUID = pNetworkMaker->AddMergeAddLayer(Utility::NeuralNetworkLayer::LayerMergeType::LYAERMERGETYPE_LAYER0, sqrtf(0.5f), lastLayerGUID, bypassLayer); bypassLayer = lastLayerGUID; { lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); } lastLayerGUID = pNetworkMaker->AddMergeAddLayer(Utility::NeuralNetworkLayer::LayerMergeType::LYAERMERGETYPE_LAYER0, sqrtf(0.5f), lastLayerGUID, bypassLayer); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver11(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddChooseBoxLayer(lastLayerGUID, Vector3D<S32>(4,4,0), Vector3D<S32>(20,20,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 256, L"ReLU", L"WeightNormalization"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 128, L"ReLU", L"WeightNormalization"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 64, L"ReLU", L"WeightNormalization"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 32, L"ReLU", L"WeightNormalization"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy", L"WeightNormalization"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver12(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy", L"WeightNormalization"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver13(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); // lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); // lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy", L"WeightNormalization"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver14(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { enum class NORMALIZATION_TYPE { NONE, BATCH, EXP }; NORMALIZATION_TYPE normalizationType = NORMALIZATION_TYPE::EXP; // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy", L"WeightNormalization"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver15(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { enum class NORMALIZATION_TYPE { NONE, BATCH, EXP }; NORMALIZATION_TYPE normalizationType = NORMALIZATION_TYPE::BATCH; // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddFullyConnectLayer(lastLayerGUID, 1024); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddFullyConnectLayer(lastLayerGUID, 512); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddFullyConnectLayer(lastLayerGUID, 256); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy", L"WeightNormalization"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver16(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // if(pNeuralNetwork) { // Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddLimitBackPropagationBoxLayer(lastLayerGUID, Vector3D<S32>(1,1,0), Vector3D<S32>(12,12,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } /** */ Gravisbell::ErrorCode LearnWithCalculateSampleError( Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkLearn, Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkSample, Layer::IOData::IIODataLayer* pTeachInputLayer, Layer::IOData::IIODataLayer* pTeachOutputLayer, Layer::IOData::IIODataLayer* pSampleInputLayer, Layer::IOData::IIODataLayer* pSampleOutputLayer, const U32 BATCH_SIZE, const U32 LEARN_TIMES) { Gravisbell::ErrorCode err; // pNeuralNetworkLearn->SetRuntimeParameter(L"UseDropOut", true); pNeuralNetworkSample->SetRuntimeParameter(L"UseDropOut", false); pNeuralNetworkLearn->SetRuntimeParameter(L"GaussianNoise_Bias", 0.0f); pNeuralNetworkLearn->SetRuntimeParameter(L"GaussianNoise_Power", 0.0f); pNeuralNetworkSample->SetRuntimeParameter(L"GaussianNoise_Bias", 0.0f); pNeuralNetworkSample->SetRuntimeParameter(L"GaussianNoise_Power", 0.0f); // err = pNeuralNetworkLearn->PreProcessLearn(BATCH_SIZE); if(err != ErrorCode::ERROR_CODE_NONE) return err; err = pTeachInputLayer->PreProcessLearn(BATCH_SIZE); if(err != ErrorCode::ERROR_CODE_NONE) return err; err = pTeachOutputLayer->PreProcessLearn(BATCH_SIZE); if(err != ErrorCode::ERROR_CODE_NONE) return err; err = pNeuralNetworkSample->PreProcessCalculate(1); if(err != ErrorCode::ERROR_CODE_NONE) return err; err = pSampleInputLayer->PreProcessCalculate(1); if(err != ErrorCode::ERROR_CODE_NONE) return err; err = pSampleOutputLayer->PreProcessCalculate(1); if(err != ErrorCode::ERROR_CODE_NONE) return err; // No Gravisbell::Common::IBatchDataNoListGenerator* pBatchDataNoListGenerator = Gravisbell::Common::CreateBatchDataNoListGenerator(); err = pBatchDataNoListGenerator->PreProcess(pTeachInputLayer->GetDataCount(), BATCH_SIZE); if(err != ErrorCode::ERROR_CODE_NONE) { delete pBatchDataNoListGenerator; return err; } std::vector<F32> lpDInputBuffer(pNeuralNetworkLearn->GetInputBufferCount(0) * BATCH_SIZE); std::vector<F32> lpOutputBuffer(pTeachOutputLayer->GetBufferCount() * BATCH_SIZE); std::vector<F32> lpTeachBuffer(pTeachOutputLayer->GetBufferCount() * BATCH_SIZE); // LSUV ( LAYER-SEQUENTIAL UNIT-VARIANCE INITIALIZATION ) { pNeuralNetworkLearn->SetRuntimeParameter(L"UpdateWeigthWithOutputVariance", true); pTeachInputLayer->PreProcessLoop(); pNeuralNetworkLearn->PreProcessLoop(); pTeachInputLayer->SetBatchDataNoList(pBatchDataNoListGenerator->GetBatchDataNoListByNum(0)); CONST_BATCH_BUFFER_POINTER lpInputBuffer[] = {pTeachInputLayer->GetOutputBuffer()}; pNeuralNetworkLearn->Calculate(lpInputBuffer); pNeuralNetworkLearn->SetRuntimeParameter(L"UpdateWeigthWithOutputVariance", false); } // for(U32 learnTime=0; learnTime<LEARN_TIMES; learnTime++) { // printf("%5d ", learnTime); printf("%5d,", learnTime); U32 correctCount_learn = 0; // U32 correctCount_sample = 0; // // { // // pBatchDataNoListGenerator->PreProcessLearnLoop(); pTeachInputLayer->PreProcessLoop(); pTeachOutputLayer->PreProcessLoop(); pNeuralNetworkLearn->PreProcessLoop(); // // for(U32 batchNum=0; batchNum<pBatchDataNoListGenerator->GetBatchDataNoListCount(); batchNum++) { #if USE_GPU if(batchNum%10 == 0) #endif { printf(" L=%5.1f%%", (F32)batchNum * 100 / pBatchDataNoListGenerator->GetBatchDataNoListCount()); printf("\b\b\b\b\b\b\b\b\b"); } // pTeachInputLayer->SetBatchDataNoList(pBatchDataNoListGenerator->GetBatchDataNoListByNum(batchNum)); pTeachOutputLayer->SetBatchDataNoList(pBatchDataNoListGenerator->GetBatchDataNoListByNum(batchNum)); // CONST_BATCH_BUFFER_POINTER lpInputBuffer[] = {pTeachInputLayer->GetOutputBuffer()}; pNeuralNetworkLearn->Calculate(lpInputBuffer); // // pTeachOutputLayer->CalculateLearnError(pNeuralNetworkLearn->GetOutputBuffer()); // BATCH_BUFFER_POINTER lppDInputBuffer[] = {&lpDInputBuffer[0]}; // pNeuralNetworkLearn->Training(lppDInputBuffer, pTeachOutputLayer->GetDInputBuffer()); pNeuralNetworkLearn->Training(NULL, pTeachOutputLayer->GetDInputBuffer()); // pTeachOutputLayer->GetOutputBuffer(&lpTeachBuffer[0]); pNeuralNetworkLearn->GetOutputBuffer(&lpOutputBuffer[0]); for(U32 batchDataNum=0; batchDataNum<pTeachOutputLayer->GetBatchSize(); batchDataNum++) { // U32 correctNo = 0; { F32 curValue = 0.0f; for(U32 i=0; i<pTeachOutputLayer->GetBufferCount(); i++) { U32 bufferPos = batchDataNum * pTeachOutputLayer->GetBufferCount() + i; if(lpTeachBuffer[bufferPos] > curValue) { correctNo = i; curValue = lpTeachBuffer[bufferPos]; } } } // U32 outputNo = 0; { F32 curValue = 0.0f; for(U32 i=0; i<pTeachOutputLayer->GetBufferCount(); i++) { U32 bufferPos = batchDataNum * pTeachOutputLayer->GetBufferCount() + i; if(lpOutputBuffer[bufferPos] > curValue) { outputNo = i; curValue = lpOutputBuffer[bufferPos]; } } } if(correctNo == outputNo) { correctCount_learn++; } } } } // { // pSampleInputLayer->PreProcessLoop(); pSampleOutputLayer->PreProcessLoop(); pNeuralNetworkSample->PreProcessLoop(); // for(U32 dataNum=0; dataNum<pSampleInputLayer->GetDataCount(); dataNum++) { #if USE_GPU if(dataNum%10 == 0) #endif { printf(" T=%5.1f%%", (F32)dataNum * 100 / pSampleInputLayer->GetDataCount()); printf("\b\b\b\b\b\b\b\b\b"); } // pSampleInputLayer->SetBatchDataNoList(&dataNum); pSampleOutputLayer->SetBatchDataNoList(&dataNum); // CONST_BATCH_BUFFER_POINTER lpInputBuffer[] = {pSampleInputLayer->GetOutputBuffer()}; pNeuralNetworkSample->Calculate(lpInputBuffer); // pSampleOutputLayer->CalculateLearnError(pNeuralNetworkSample->GetOutputBuffer()); // pSampleOutputLayer->GetOutputBuffer(&lpTeachBuffer[0]); pNeuralNetworkSample->GetOutputBuffer(&lpOutputBuffer[0]); { U32 correctNo = 0; { F32 curValue = 0.0f; for(U32 i=0; i<pSampleOutputLayer->GetBufferCount(); i++) { if(lpTeachBuffer[i] > curValue) { correctNo = i; curValue = lpTeachBuffer[i]; } } } // U32 outputNo = 0; { F32 curValue = 0.0f; for(U32 i=0; i<pSampleOutputLayer->GetBufferCount(); i++) { if(lpOutputBuffer[i] > curValue) { outputNo = i; curValue = lpOutputBuffer[i]; } } } if(correctNo == outputNo) { correctCount_sample++; } } } } // { F32 errorMax, errorAve, errorAve2, errorCrossEntoropy; pTeachOutputLayer->GetCalculateErrorValue(errorMax, errorAve, errorAve2, errorCrossEntoropy); // printf("max=%.3f, ave=%.3f, ave2=%.3f, entropy=%.3f", errorMax, errorAve, errorAve2, errorCrossEntoropy); printf("%.3f,%.3f,%.3f,%.3f,", errorMax, errorAve2, errorCrossEntoropy, (F32)correctCount_learn / (pBatchDataNoListGenerator->GetBatchDataNoListCount() * BATCH_SIZE)); } // printf(" : "); { F32 errorMax, errorAve, errorAve2, errorCrossEntoropy; pSampleOutputLayer->GetCalculateErrorValue(errorMax, errorAve, errorAve2, errorCrossEntoropy); // printf("max=%.3f, ave=%.3f, ave2=%.3f, entropy=%.3f", errorMax, errorAve, errorAve2, errorCrossEntoropy); printf("%.3f,%.3f,%.3f,%.3f", errorMax, errorAve2, errorCrossEntoropy, (F32)correctCount_sample / pSampleInputLayer->GetDataCount()); } printf("\n"); } // delete pBatchDataNoListGenerator; return ErrorCode::ERROR_CODE_NONE; }
007d951e7767f09bce98c5f9b6626ea4fb19b1c7.cu
// Sample04_MNIST.cpp : コンソール アプリケーションのエントリ ポイントを定義します。 // #include "stdafx.h" #include<crtdbg.h> #include<cuda.h> #include<cuda_runtime.h> #include<thrust/device_vector.h> #include<vector> #include<boost/filesystem/path.hpp> #include<boost/uuid/uuid_generators.hpp> #include"Library/Common/BatchDataNoListGenerator.h" #include"Library/DataFormat/Binary.h" #include"Library/NeuralNetwork/LayerDLLManager.h" #include"Library/NeuralNetwork/LayerDataManager.h" #include"Library/NeuralNetwork/NetworkParserXML.h" #include"Library/Layer/IOData/IODataLayer.h" #include"Layer/Connect/ILayerConnectData.h" #include"Layer/NeuralNetwork/INeuralNetwork.h" #include"Utility/NeuralNetworkLayer.h" #include"Utility/NeuralNetworkMaker.h" #include"Library/NeuralNetwork/Initializer.h" using namespace Gravisbell; #define USE_GPU 1 #define USE_HOST_MEMORY 1 #define USE_BATCHNORM 1 #define USE_DROPOUT 1 #define USE_BATCH_SIZE 4 #define MAX_EPOCH 20 /** データファイルをを読み込む @param o_ppDataLayerTeach 教師データを格納したデータクラスの格納先ポインタアドレス @param o_ppDataLayerTest テストデータを格納したデータクラスの格納先ポインタアドレス @param i_testRate テストデータを全体の何%にするか0〜1の間で設定 @param i_formatFilePath フォーマット設定の入ったXMLファイルパス @param i_dataFilePath データの入ったバイナリファイルパス */ Gravisbell::ErrorCode LoadSampleData_image( Layer::IOData::IIODataLayer** o_ppDataLayerTeach, Layer::IOData::IIODataLayer** o_ppDataLayerTest, F32 i_testRate, boost::filesystem::wpath i_formatFilePath, boost::filesystem::wpath i_dataFilePath); Gravisbell::ErrorCode LoadSampleData_label( Layer::IOData::IIODataLayer** o_ppDataLayerTeach, Layer::IOData::IIODataLayer** o_ppDataLayerTest, F32 i_testRate, boost::filesystem::wpath i_formatFilePath, boost::filesystem::wpath i_dataFilePath); /** ニューラルネットワーククラスを作成する */ Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver01(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& inputDataStruct, const IODataStruct& outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver02(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver03(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver04(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver05(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver06(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver07(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver08(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver09(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver10(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver11(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver12(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver13(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver14(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver15(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver16(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct); Layer::Connect::ILayerConnectData* CreateNeuralNetwork(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& inputDataStruct, const IODataStruct& outputDataStruct) { return CreateNeuralNetwork_ver16(layerDLLManager, layerDataManager, inputDataStruct, outputDataStruct); } /** ニューラルネットワークの学習とサンプル実行を同時実行 */ Gravisbell::ErrorCode LearnWithCalculateSampleError( Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkLearn, Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkSample, Layer::IOData::IIODataLayer* pTeachInputLayer, Layer::IOData::IIODataLayer* pTeachTeachLayer, Layer::IOData::IIODataLayer* pSampleInputLayer, Layer::IOData::IIODataLayer* pSampleTeachLayer, const U32 BATCH_SIZE, const U32 LEARN_TIMES); int _tmain(int argc, _TCHAR* argv[]) { #ifdef _DEBUG ::_CrtSetDbgFlag(_CRTDBG_LEAK_CHECK_DF | _CRTDBG_ALLOC_MEM_DF); #endif boost::filesystem::path workDirPath = boost::filesystem::current_path(); //void* pValue = NULL; //cudaMalloc(&pValue, 16); //cudaFree(&pValue); // 画像を読み込み Layer::IOData::IIODataLayer* pDataLayerTeach_Input = NULL; Layer::IOData::IIODataLayer* pDataLayerTeach_Output = NULL; Layer::IOData::IIODataLayer* pDataLayerTest_Input = NULL; Layer::IOData::IIODataLayer* pDataLayerTest_Output = NULL; #ifndef _WIN64 if(LoadSampleData_image(&pDataLayerTeach_Input, &pDataLayerTest_Input, 0.1f, L"../SampleData/MNIST/DataFormat_image.xml", L"../SampleData/MNIST/train-images.idx3-ubyte") != Gravisbell::ErrorCode::ERROR_CODE_NONE) #else if(LoadSampleData_image(&pDataLayerTeach_Input, &pDataLayerTest_Input, 0.1f, L"../../SampleData/MNIST/DataFormat_image.xml", L"../../SampleData/MNIST/train-images.idx3-ubyte") != Gravisbell::ErrorCode::ERROR_CODE_NONE) #endif { return -1; } #ifndef _WIN64 if(LoadSampleData_label(&pDataLayerTeach_Output, &pDataLayerTest_Output, 0.1f, L"../SampleData/MNIST/DataFormat_label.xml", L"../SampleData/MNIST/train-labels.idx1-ubyte") != Gravisbell::ErrorCode::ERROR_CODE_NONE) #else if(LoadSampleData_label(&pDataLayerTeach_Output, &pDataLayerTest_Output, 0.1f, L"../../SampleData/MNIST/DataFormat_label.xml", L"../../SampleData/MNIST/train-labels.idx1-ubyte") != Gravisbell::ErrorCode::ERROR_CODE_NONE) #endif { delete pDataLayerTeach_Input; delete pDataLayerTest_Input; return -1; } // レイヤーDLL管理クラスを作成 #if USE_GPU Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager = Gravisbell::Utility::NeuralNetworkLayer::CreateLayerDLLManagerGPU(L"./"); #else Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager = Gravisbell::Utility::NeuralNetworkLayer::CreateLayerDLLManagerCPU(L"./"); #endif if(pLayerDLLManager == NULL) { delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; return -1; } // レイヤーデータ管理クラスを作成 Gravisbell::Layer::NeuralNetwork::ILayerDataManager* pLayerDataManager = Gravisbell::Layer::NeuralNetwork::CreateLayerDataManager(); if(pLayerDataManager == NULL) { delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDLLManager; return -1; } // 乱数を固定 //#ifdef _DEBUG Gravisbell::Layer::NeuralNetwork::GetInitializerManager().InitializeRandomParameter(0); //#endif // ニューラルネットワーク作成 Gravisbell::Layer::Connect::ILayerConnectData* pNeuralNetworkData = CreateNeuralNetwork(*pLayerDLLManager, *pLayerDataManager, pDataLayerTeach_Input->GetInputDataStruct(), pDataLayerTeach_Output->GetDataStruct()); if(pNeuralNetworkData == NULL) { delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDataManager; delete pLayerDLLManager; return -1; } // ファイルに保存する printf("バイナリファイル保存\n"); Gravisbell::Utility::NeuralNetworkLayer::WriteNetworkToBinaryFile(*pNeuralNetworkData, L"../../LayerData/test.bin"); // ファイルから読み込む { pLayerDataManager->EraseLayerByGUID(pNeuralNetworkData->GetGUID()); pNeuralNetworkData = NULL; Gravisbell::Layer::ILayerData* pTmpNeuralNetworkData = NULL; printf("バイナリファイル読み込み\n"); Gravisbell::Utility::NeuralNetworkLayer::ReadNetworkFromBinaryFile(*pLayerDLLManager, &pTmpNeuralNetworkData, L"../../LayerData/test.bin"); // 別ファイルに保存する printf("バイナリファイル保存2\n"); Gravisbell::Utility::NeuralNetworkLayer::WriteNetworkToBinaryFile(*pTmpNeuralNetworkData, L"../../LayerData/test2.bin"); printf("終了\n"); pNeuralNetworkData = dynamic_cast<Gravisbell::Layer::Connect::ILayerConnectData*>(pTmpNeuralNetworkData); } //// XMLファイルに保存する //Gravisbell::Layer::NeuralNetwork::Parser::SaveLayerToXML(*pNeuralNetworkData, L"../../LayerData/", L"test.xml"); //// ファイルから読み込む //for(auto pLayerData : lppLayerData) // delete pLayerData; //lppLayerData.clear(); //pNeuralNetworkData = Gravisbell::Layer::NeuralNetwork::Parser::CreateLayerFromXML(*pLayerDLLManager, *pLayerDataManager, L"../../LayerData/", L"test.xml"); //// バイナリファイルに保存する //Gravisbell::Utility::NeuralNetworkLayer::WriteNetworkToBinaryFile(*pNeuralNetworkData, "../../LayerData/test2.bin"); //// 別のXMLファイルに保存する //Gravisbell::Layer::NeuralNetwork::Parser::SaveLayerToXML(*pNeuralNetworkData, L"../../LayerData/", L"test2.xml"); // 学習用ニューラルネットワーク作成 Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkLearn = NULL; { #if USE_HOST_MEMORY Layer::ILayerBase* pLayer = pNeuralNetworkData->CreateLayer(boost::uuids::random_generator()().data, &pDataLayerTeach_Input->GetOutputDataStruct(), 1); #else Layer::ILayerBase* pLayer = pNeuralNetworkData->CreateLayer_device(boost::uuids::random_generator()().data, &pDataLayerTeach_Input->GetOutputDataStruct(), 1); #endif pNeuralNetworkLearn = dynamic_cast<Layer::NeuralNetwork::INeuralNetwork*>(pLayer); if(pNeuralNetworkLearn == NULL) { if(pLayer) delete pLayer; } } if(pNeuralNetworkLearn == NULL) { delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDataManager; delete pLayerDLLManager; return -1; } // テスト用ニューラルネットワーク作成 Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkTest = NULL; { #if USE_HOST_MEMORY Layer::ILayerBase* pLayer = pNeuralNetworkData->CreateLayer(boost::uuids::random_generator()().data, &pDataLayerTeach_Input->GetOutputDataStruct(), 1); #else Layer::ILayerBase* pLayer = pNeuralNetworkData->CreateLayer_device(boost::uuids::random_generator()().data, &pDataLayerTeach_Input->GetOutputDataStruct(), 1); #endif pNeuralNetworkTest = dynamic_cast<Layer::NeuralNetwork::INeuralNetwork*>(pLayer); if(pNeuralNetworkTest == NULL) { if(pLayer) delete pLayer; } } if(pNeuralNetworkTest == NULL) { delete pNeuralNetworkLearn; delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDataManager; delete pLayerDLLManager; return -1; } // 学習, テスト実行 { time_t startTime = time(NULL); // 学習 if(::LearnWithCalculateSampleError(pNeuralNetworkLearn, pNeuralNetworkTest, pDataLayerTeach_Input, pDataLayerTeach_Output, pDataLayerTest_Input, pDataLayerTest_Output, USE_BATCH_SIZE, MAX_EPOCH) != ErrorCode::ERROR_CODE_NONE) { delete pNeuralNetworkLearn; delete pNeuralNetworkTest; delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDataManager; delete pLayerDLLManager; return -1; } time_t endTime = time(NULL); printf("経過時間(s) : %ld\n", (endTime - startTime)); } // バッファ開放 delete pNeuralNetworkData; delete pNeuralNetworkLearn; delete pNeuralNetworkTest; delete pDataLayerTeach_Input; delete pDataLayerTeach_Output; delete pDataLayerTest_Input; delete pDataLayerTest_Output; delete pLayerDataManager; delete pLayerDLLManager; printf("Press any key to continue"); getc(stdin); return 0; } /** データファイルをを読み込む @param o_ppDataLayerTeach 教師データを格納したデータクラスの格納先ポインタアドレス @param o_ppDataLayerTest テストデータを格納したデータクラスの格納先ポインタアドレス @param i_testRate テストデータを全体の何%にするか0〜1の間で設定 @param i_formatFilePath フォーマット設定の入ったXMLファイルパス @param i_dataFilePath データの入ったバイナリファイルパス */ Gravisbell::ErrorCode LoadSampleData_image( Layer::IOData::IIODataLayer** o_ppDataLayerTeach, Layer::IOData::IIODataLayer** o_ppDataLayerTest, F32 i_testRate, boost::filesystem::wpath i_formatFilePath, boost::filesystem::wpath i_dataFilePath) { // フォーマットを読み込む Gravisbell::DataFormat::Binary::IDataFormat* pDataFormat = Gravisbell::DataFormat::Binary::CreateDataFormatFromXML(i_formatFilePath.c_str()); if(pDataFormat == NULL) return Gravisbell::ErrorCode::ERROR_CODE_COMMON_FILE_NOT_FOUND; // バッファを読み込む std::vector<BYTE> lpBuf; { FILE* fp = _wfopen(i_dataFilePath.c_str(), L"rb"); if(fp == NULL) { delete pDataFormat; return Gravisbell::ErrorCode::ERROR_CODE_COMMON_FILE_NOT_FOUND; } fseek(fp, 0, SEEK_END); U32 fileSize = ftell(fp); lpBuf.resize(fileSize); fseek(fp, 0, SEEK_SET); fread(&lpBuf[0], 1, fileSize, fp); fclose(fp); } // フォーマットを使ってヘッダを読み込む U32 bufPos = 0; // ヘッダを読み込む bufPos = pDataFormat->LoadBinary(&lpBuf[0], (U32)lpBuf.size()); // データ構造を作成する Gravisbell::IODataStruct dataStruct(1, pDataFormat->GetVariableValue(L"columns"), pDataFormat->GetVariableValue(L"rows"), 1); #if USE_GPU #if USE_HOST_MEMORY *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); #else *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerGPU_host(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerGPU_host(dataStruct); //*o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerGPU_device(dataStruct); //*o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerGPU_device(dataStruct); #endif #else *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); #endif std::vector<F32> lpTmpBuf(dataStruct.GetDataCount()); // データの見込み U32 dataCount = (U32)pDataFormat->GetVariableValue(L"images"); U32 teachDataCount = (U32)(dataCount*(1.0f - i_testRate)); for(U32 imageNum=0; imageNum<dataCount; imageNum++) { if(bufPos + dataStruct.GetDataCount() > lpBuf.size()) break; // U08 -> F32 変換 for(U32 i=0; i<lpTmpBuf.size(); i++) { lpTmpBuf[i] = (F32)lpBuf[bufPos + i] / 0xFF; } if(imageNum < teachDataCount) (*o_ppDataLayerTeach)->AddData(&lpTmpBuf[0]); else (*o_ppDataLayerTest)->AddData(&lpTmpBuf[0]); bufPos += dataStruct.GetDataCount(); } // データフォーマット削除 delete pDataFormat; return Gravisbell::ErrorCode::ERROR_CODE_NONE; } Gravisbell::ErrorCode LoadSampleData_label( Layer::IOData::IIODataLayer** o_ppDataLayerTeach, Layer::IOData::IIODataLayer** o_ppDataLayerTest, F32 i_testRate, boost::filesystem::wpath i_formatFilePath, boost::filesystem::wpath i_dataFilePath) { // フォーマットを読み込む Gravisbell::DataFormat::Binary::IDataFormat* pDataFormat = Gravisbell::DataFormat::Binary::CreateDataFormatFromXML(i_formatFilePath.c_str()); if(pDataFormat == NULL) return Gravisbell::ErrorCode::ERROR_CODE_COMMON_FILE_NOT_FOUND; // バッファを読み込む std::vector<BYTE> lpBuf; { FILE* fp = _wfopen(i_dataFilePath.c_str(), L"rb"); if(fp == NULL) { delete pDataFormat; return Gravisbell::ErrorCode::ERROR_CODE_COMMON_FILE_NOT_FOUND; } fseek(fp, 0, SEEK_END); U32 fileSize = ftell(fp); lpBuf.resize(fileSize); fseek(fp, 0, SEEK_SET); fread(&lpBuf[0], 1, fileSize, fp); fclose(fp); } // フォーマットを使ってヘッダを読み込む U32 bufPos = 0; // ヘッダを読み込む bufPos = pDataFormat->LoadBinary(&lpBuf[0], (U32)lpBuf.size()); // データ構造を作成する Gravisbell::IODataStruct dataStruct(10, 1, 1, 1); #if USE_GPU #if USE_HOST_MEMORY *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); #else *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerGPU_host(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerGPU_host(dataStruct); // *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerGPU_device(dataStruct); // *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerGPU_device(dataStruct); #endif #else *o_ppDataLayerTeach = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); *o_ppDataLayerTest = Gravisbell::Layer::IOData::CreateIODataLayerCPU(dataStruct); #endif std::vector<F32> lpTmpBuf(dataStruct.ch); // データの見込み U32 dataCount = (U32)pDataFormat->GetVariableValue(L"images"); U32 teachDataCount = (U32)(dataCount*(1.0f - i_testRate)); for(U32 imageNum=0; imageNum<dataCount; imageNum++) { // U08 -> F32 変換 for(U32 i=0; i<lpTmpBuf.size(); i++) { if(i == lpBuf[bufPos]) lpTmpBuf[i] = 1.0f; else lpTmpBuf[i] = 0.0f; } if(imageNum < teachDataCount) (*o_ppDataLayerTeach)->AddData(&lpTmpBuf[0]); else (*o_ppDataLayerTest)->AddData(&lpTmpBuf[0]); bufPos += 1; } // データフォーマット削除 delete pDataFormat; return Gravisbell::ErrorCode::ERROR_CODE_NONE; } /** ニューラルネットワーククラスを作成する */ Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver01(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { using namespace Gravisbell::Utility::NeuralNetworkLayer; Gravisbell::ErrorCode err; // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = CreateNeuralNetwork(layerDLLManager, layerDataManager, 1); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); // ノイズレイヤー err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateGaussianNoiseLayer(layerDLLManager, layerDataManager, 0.0f, 0.1f), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 1層目 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 4, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_BATCHNORM err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_DROPOUT err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateDropoutLayer(layerDLLManager, layerDataManager, 0.2f), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif #if 1 // Single // 2層目 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_BATCHNORM err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_DROPOUT err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateDropoutLayer(layerDLLManager, layerDataManager, 0.5f), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif #if 0 // Expand // 3層目 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_BATCHNORM err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_DROPOUT err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateDropoutLayer(layerDLLManager, layerDataManager, inputDataStruct, 0.5f)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif // 4層目 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_BATCHNORM err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_DROPOUT err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateDropoutLayer(layerDLLManager, layerDataManager, inputDataStruct, 0.5f)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif // 5層目 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_BATCHNORM err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #if USE_DROPOUT err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateDropoutLayer(layerDLLManager, layerDataManager, inputDataStruct, 0.5f)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #endif #endif // Expand #elif 0 // MergeInput // 1層目のGUIDを記録 Gravisbell::GUID lastLayerGUID_A = lastLayerGUID; Gravisbell::GUID lastLayerGUID_B = lastLayerGUID; // 2層目A { err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_A, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_A, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_A, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_A, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_A, &i_inputDataStruct, 1).ch)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_A, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_A, CreateDropoutLayer(layerDLLManager, layerDataManager, 0.5f)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; } // 2層目B { err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_B, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_B, &i_inputDataStruct, 1).ch, Vector3D<S32>(7,7,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(3,3,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_B, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_B, CreateBatchNormalizationLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_B, &i_inputDataStruct, 1).ch)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_B, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_B, CreateDropoutLayer(layerDLLManager, layerDataManager, 0.5f)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; } // A,B結合層 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateMergeInputLayer(layerDLLManager, layerDataManager), lastLayerGUID_A, lastLayerGUID_B); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #elif 1 // ResNet // ショートカットレイヤーを保存する Gravisbell::GUID lastLayerGUID_shortCut = lastLayerGUID; // 2層目 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 残差レイヤー err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateResidualLayer(layerDLLManager, layerDataManager), lastLayerGUID, lastLayerGUID_shortCut); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; //// A,B結合層 //err = AddLayerToNetworkLast( // *pNeuralNetwork, // lppLayerData, // inputDataStruct, lastLayerGUID, // CreateMergeInputLayer(layerDLLManager, inputDataStruct, inputDataStruct_shortCut), // lastLayerGUID, lastLayerGUID_shortCut); //if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; //err = AddLayerToNetworkLast( // *pNeuralNetwork, // lppLayerData, // inputDataStruct, lastLayerGUID, // CreateBatchNormalizationLayer(layerDLLManager, inputDataStruct)); //if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; //err = AddLayerToNetworkLast( // *pNeuralNetwork, // lppLayerData, // inputDataStruct, lastLayerGUID, // CreateDropoutLayer(layerDLLManager, inputDataStruct, 0.5f)); //if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #elif 0// UpSampling // 2層目 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateUpSamplingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), true)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; //err = AddLayerToNetworkLast( // *pNeuralNetwork, // lppLayerData, // inputDataStruct, lastLayerGUID, // CreateBatchNormalizationLayer(layerDLLManager, inputDataStruct)); //if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; //err = AddLayerToNetworkLast( // *pNeuralNetwork, // lppLayerData, // inputDataStruct, lastLayerGUID, // CreateDropoutLayer(layerDLLManager, inputDataStruct, 0.5f)); //if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #else #endif // 3層目 #if 1 // 全結合 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateFullyConnectLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).GetDataCount(), i_outputDataStruct.GetDataCount()), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"softmax_ALL_crossEntropy"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #elif 0 // GlobalAveragePooling // 畳み込み(出力:2ch) err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, inputDataStruct, Vector3D<S32>(5,5,1), outputDataStruct.GetDataCount(), Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0))); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // Pooling err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateGlobalAveragePoolingLayer(layerDLLManager, layerDataManager, inputDataStruct)); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 活性化 err = AddLayerToNetworkLast( *pNeuralNetwork, inputDataStruct, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, inputDataStruct, L"softmax_ALL_crossEntropy")); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; #else #endif // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"SGD"); pNeuralNetwork->SetOptimizerHyperParameter(L"LearnCoeff", 0.005f); // pNeuralNetwork->ChangeOptimizer(L"Adam"); return pNeuralNetwork; } /** ニューラルネットワーククラスを作成する */ Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver02(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { using namespace Gravisbell::Utility::NeuralNetworkLayer; Gravisbell::ErrorCode err; // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = CreateNeuralNetwork(layerDLLManager, layerDataManager, 1); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); // 1層目 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 4, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 正規化レイヤー err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateBatchNormalizationAllLayer(layerDLLManager, layerDataManager), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 2層目 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // チャンネル分割 Gravisbell::GUID lastLayerGUID_chA = lastLayerGUID; Gravisbell::GUID lastLayerGUID_chB = lastLayerGUID; Gravisbell::GUID lastLayerGUID_chC = lastLayerGUID; // A { // 分割 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chA, CreateChooseChannelLayer(layerDLLManager, layerDataManager, 0, 4), false ); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chA, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_chA, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chA, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; } // B { // 分割 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chB, CreateChooseChannelLayer(layerDLLManager, layerDataManager, 4, 4), false ); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chB, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_chB, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chB, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; } // C { // 分割 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chC, CreateChooseChannelLayer(layerDLLManager, layerDataManager, 8, 8), false ); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chC, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID_chC, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID_chC, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; } // マージ err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateMergeAverageLayer(layerDLLManager, layerDataManager, LayerMergeType::LYAERMERGETYPE_MIN), false, lastLayerGUID_chA, lastLayerGUID_chB, lastLayerGUID_chC); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 4層目 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateConvolutionLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).ch, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreatePoolingLayer(layerDLLManager, layerDataManager, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"ReLU"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 全結合 err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateFullyConnectLayer(layerDLLManager, layerDataManager, pNeuralNetwork->GetOutputDataStruct(lastLayerGUID, &i_inputDataStruct, 1).GetDataCount(), i_outputDataStruct.GetDataCount()), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; err = AddLayerToNetworkLast( *pNeuralNetwork, lastLayerGUID, CreateActivationLayer(layerDLLManager, layerDataManager, L"softmax_ALL_crossEntropy"), false); if(err != ErrorCode::ERROR_CODE_NONE) return NULL; // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 // pNeuralNetwork->ChangeOptimizer(L"SGD"); // pNeuralNetwork->SetOptimizerHyperParameter(L"LearnCoeff", 0.005f); pNeuralNetwork->ChangeOptimizer(L"Adam"); return pNeuralNetwork; } /** ニューラルネットワーククラスを作成する */ Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver03(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); // 1層目 lastLayerGUID = pNetworkMaker->AddConvolutionLayer(lastLayerGUID, Vector3D<S32>(5,5,1), 4, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); // 正規化レイヤー lastLayerGUID = pNetworkMaker->AddNormalizationScaleLayer(lastLayerGUID); // 2層目 lastLayerGUID = pNetworkMaker->AddConvolutionLayer(lastLayerGUID, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); // チャンネル分割 Gravisbell::GUID lastLayerGUID_chA = lastLayerGUID; Gravisbell::GUID lastLayerGUID_chB = lastLayerGUID; Gravisbell::GUID lastLayerGUID_chC = lastLayerGUID; // A { // 分割 lastLayerGUID_chA = pNetworkMaker->AddChooseChannelLayer(lastLayerGUID_chA, 0, 4); lastLayerGUID_chA = pNetworkMaker->AddConvolutionLayer(lastLayerGUID_chA, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID_chA = pNetworkMaker->AddActivationLayer(lastLayerGUID_chA, L"ReLU"); } // B { // 分割 lastLayerGUID_chB = pNetworkMaker->AddChooseChannelLayer(lastLayerGUID_chB, 4, 4); lastLayerGUID_chB = pNetworkMaker->AddConvolutionLayer(lastLayerGUID_chB, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID_chB = pNetworkMaker->AddActivationLayer(lastLayerGUID_chB, L"ReLU"); } // C { // 分割 lastLayerGUID_chC = pNetworkMaker->AddChooseChannelLayer(lastLayerGUID_chC, 8, 8); lastLayerGUID_chC = pNetworkMaker->AddConvolutionLayer(lastLayerGUID_chC, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID_chC = pNetworkMaker->AddActivationLayer(lastLayerGUID_chC, L"ReLU"); } // マージ lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LayerMergeType::LYAERMERGETYPE_MIN, lastLayerGUID_chA, lastLayerGUID_chB, lastLayerGUID_chC); // 4層目 lastLayerGUID = pNetworkMaker->AddConvolutionLayer(lastLayerGUID, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"Default", L"he_normal"); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); // 全結合 lastLayerGUID = pNetworkMaker->AddFullyConnectLayer(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"Default", L"glorot_normal"); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"softmax_ALL_crossEntropy"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 // pNeuralNetwork->ChangeOptimizer(L"SGD"); // pNeuralNetwork->SetOptimizerHyperParameter(L"LearnCoeff", 0.005f); pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } /** ニューラルネットワーククラスを作成する */ Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver04(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); // lastLayerGUID = pNetworkMaker->AddReshapeMirrorXLayer(lastLayerGUID); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 4, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CBAD(lastLayerGUID, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 64, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 32, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver05(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 1, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddReshapeLayer(lastLayerGUID, IODataStruct(14, 56, 1, 1)); lastLayerGUID = pNetworkMaker->AddNormalizationScaleLayer(lastLayerGUID); lastLayerGUID = pNetworkMaker->AddReshapeSquareZeroSideLeftTopLayer(lastLayerGUID, 10, 6); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 4, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CAD(lastLayerGUID, Vector3D<S32>(5,5,1), 32, Vector3D<S32>(1,1,1), Vector3D<S32>(2,2,0), L"ReLU", 0.5f); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 64, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 32, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver06(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 1024, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 512, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 256, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 128, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 64, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 32, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver07(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddChooseBoxLayer(lastLayerGUID, Vector3D<S32>(4,4,0), Vector3D<S32>(20,20,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 256, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 128, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 64, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 32, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver08(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddDilatedConvolutionLayer(lastLayerGUID, Vector3D<S32>(3,3,1), 8, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,0)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); lastLayerGUID = pNetworkMaker->AddDilatedConvolutionLayer(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1), Vector3D<S32>(0,0,0)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver09(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddValue2SignalArrayLayer(lastLayerGUID, 0.0f, 1.0, 8); lastLayerGUID = pNetworkMaker->AddDilatedConvolutionLayer(lastLayerGUID, Vector3D<S32>(3,3,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,1), Vector3D<S32>(0,0,0)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); lastLayerGUID = pNetworkMaker->AddDilatedConvolutionLayer(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,1), Vector3D<S32>(0,0,0)); lastLayerGUID = pNetworkMaker->AddActivationLayer(lastLayerGUID, L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver10(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); Gravisbell::GUID bypassLayer = lastLayerGUID; { lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); } lastLayerGUID = pNetworkMaker->AddMergeAddLayer(Utility::NeuralNetworkLayer::LayerMergeType::LYAERMERGETYPE_LAYER0, sqrtf(0.5f), lastLayerGUID, bypassLayer); bypassLayer = lastLayerGUID; { lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); } lastLayerGUID = pNetworkMaker->AddMergeAddLayer(Utility::NeuralNetworkLayer::LayerMergeType::LYAERMERGETYPE_LAYER0, sqrtf(0.5f), lastLayerGUID, bypassLayer); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver11(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddChooseBoxLayer(lastLayerGUID, Vector3D<S32>(4,4,0), Vector3D<S32>(20,20,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 256, L"ReLU", L"WeightNormalization"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 128, L"ReLU", L"WeightNormalization"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 64, L"ReLU", L"WeightNormalization"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, 32, L"ReLU", L"WeightNormalization"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy", L"WeightNormalization"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver12(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy", L"WeightNormalization"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver13(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); // lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); // lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy", L"WeightNormalization"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver14(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { enum class NORMALIZATION_TYPE { NONE, BATCH, EXP }; NORMALIZATION_TYPE normalizationType = NORMALIZATION_TYPE::EXP; // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddMergeMultiplyLayer( Gravisbell::Utility::NeuralNetworkLayer::LYAERMERGETYPE_LAYER0, pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"sigmoid"), pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 16, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"tanh")); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy", L"WeightNormalization"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver15(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { enum class NORMALIZATION_TYPE { NONE, BATCH, EXP }; NORMALIZATION_TYPE normalizationType = NORMALIZATION_TYPE::BATCH; // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddFullyConnectLayer(lastLayerGUID, 1024); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddFullyConnectLayer(lastLayerGUID, 512); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddFullyConnectLayer(lastLayerGUID, 256); switch(normalizationType) { case NORMALIZATION_TYPE::NONE: break; case NORMALIZATION_TYPE::BATCH: lastLayerGUID = pNetworkMaker->AddBatchNormalizationLayer(lastLayerGUID); break; case NORMALIZATION_TYPE::EXP: lastLayerGUID = pNetworkMaker->AddExponentialNormalizationLayer(lastLayerGUID, 64, 4); break; } lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy", L"WeightNormalization"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } Layer::Connect::ILayerConnectData* CreateNeuralNetwork_ver16(const Layer::NeuralNetwork::ILayerDLLManager& layerDLLManager, Layer::NeuralNetwork::ILayerDataManager& layerDataManager, const IODataStruct& i_inputDataStruct, const IODataStruct& i_outputDataStruct) { // ニューラルネットワーク作成クラスを作成 Gravisbell::Utility::NeuralNetworkLayer::INeuralNetworkMaker* pNetworkMaker = Gravisbell::Utility::NeuralNetworkLayer::CreateNeuralNetworkManaker(layerDLLManager, layerDataManager, &i_inputDataStruct, 1); // ニューラルネットワークを作成 Layer::Connect::ILayerConnectData* pNeuralNetwork = pNetworkMaker->GetNeuralNetworkLayer(); if(pNeuralNetwork == NULL) return NULL; // レイヤーを追加する if(pNeuralNetwork) { // 入力信号を直前レイヤーに設定 Gravisbell::GUID lastLayerGUID = pNeuralNetwork->GetInputGUID(0); lastLayerGUID = pNetworkMaker->AddPoolingLayer(lastLayerGUID, Vector3D<S32>(2,2,1), Vector3D<S32>(2,2,1)); lastLayerGUID = pNetworkMaker->AddLimitBackPropagationBoxLayer(lastLayerGUID, Vector3D<S32>(1,1,0), Vector3D<S32>(12,12,1)); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_CA(lastLayerGUID, Vector3D<S32>(3,3,1), 8, Vector3D<S32>(1,1,1), Vector3D<S32>(1,1,0), L"ReLU"); lastLayerGUID = pNetworkMaker->AddNeuralNetworkLayer_FA(lastLayerGUID, i_outputDataStruct.GetDataCount(), L"softmax_ALL_crossEntropy"); // 出力レイヤー設定 pNeuralNetwork->SetOutputLayerGUID(lastLayerGUID); } // 出力データ構造が正しいことを確認 if(pNeuralNetwork->GetOutputDataStruct(&i_inputDataStruct, 1) != i_outputDataStruct) { layerDataManager.EraseLayerByGUID(pNeuralNetwork->GetGUID()); return NULL; } // オプティマイザーの設定 pNeuralNetwork->ChangeOptimizer(L"Adam"); delete pNetworkMaker; return pNeuralNetwork; } /** ニューラルネットワークの学習とサンプル実行を同時実行 */ Gravisbell::ErrorCode LearnWithCalculateSampleError( Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkLearn, Layer::NeuralNetwork::INeuralNetwork* pNeuralNetworkSample, Layer::IOData::IIODataLayer* pTeachInputLayer, Layer::IOData::IIODataLayer* pTeachOutputLayer, Layer::IOData::IIODataLayer* pSampleInputLayer, Layer::IOData::IIODataLayer* pSampleOutputLayer, const U32 BATCH_SIZE, const U32 LEARN_TIMES) { Gravisbell::ErrorCode err; // 実行時設定 pNeuralNetworkLearn->SetRuntimeParameter(L"UseDropOut", true); pNeuralNetworkSample->SetRuntimeParameter(L"UseDropOut", false); pNeuralNetworkLearn->SetRuntimeParameter(L"GaussianNoise_Bias", 0.0f); pNeuralNetworkLearn->SetRuntimeParameter(L"GaussianNoise_Power", 0.0f); pNeuralNetworkSample->SetRuntimeParameter(L"GaussianNoise_Bias", 0.0f); pNeuralNetworkSample->SetRuntimeParameter(L"GaussianNoise_Power", 0.0f); // 事前処理を実行 err = pNeuralNetworkLearn->PreProcessLearn(BATCH_SIZE); if(err != ErrorCode::ERROR_CODE_NONE) return err; err = pTeachInputLayer->PreProcessLearn(BATCH_SIZE); if(err != ErrorCode::ERROR_CODE_NONE) return err; err = pTeachOutputLayer->PreProcessLearn(BATCH_SIZE); if(err != ErrorCode::ERROR_CODE_NONE) return err; err = pNeuralNetworkSample->PreProcessCalculate(1); if(err != ErrorCode::ERROR_CODE_NONE) return err; err = pSampleInputLayer->PreProcessCalculate(1); if(err != ErrorCode::ERROR_CODE_NONE) return err; err = pSampleOutputLayer->PreProcessCalculate(1); if(err != ErrorCode::ERROR_CODE_NONE) return err; // バッチNo生成クラスを作成 Gravisbell::Common::IBatchDataNoListGenerator* pBatchDataNoListGenerator = Gravisbell::Common::CreateBatchDataNoListGenerator(); err = pBatchDataNoListGenerator->PreProcess(pTeachInputLayer->GetDataCount(), BATCH_SIZE); if(err != ErrorCode::ERROR_CODE_NONE) { delete pBatchDataNoListGenerator; return err; } std::vector<F32> lpDInputBuffer(pNeuralNetworkLearn->GetInputBufferCount(0) * BATCH_SIZE); std::vector<F32> lpOutputBuffer(pTeachOutputLayer->GetBufferCount() * BATCH_SIZE); std::vector<F32> lpTeachBuffer(pTeachOutputLayer->GetBufferCount() * BATCH_SIZE); // LSUV ( LAYER-SEQUENTIAL UNIT-VARIANCE INITIALIZATION ) を実行する { pNeuralNetworkLearn->SetRuntimeParameter(L"UpdateWeigthWithOutputVariance", true); pTeachInputLayer->PreProcessLoop(); pNeuralNetworkLearn->PreProcessLoop(); pTeachInputLayer->SetBatchDataNoList(pBatchDataNoListGenerator->GetBatchDataNoListByNum(0)); CONST_BATCH_BUFFER_POINTER lpInputBuffer[] = {pTeachInputLayer->GetOutputBuffer()}; pNeuralNetworkLearn->Calculate(lpInputBuffer); pNeuralNetworkLearn->SetRuntimeParameter(L"UpdateWeigthWithOutputVariance", false); } // 学習を実行 for(U32 learnTime=0; learnTime<LEARN_TIMES; learnTime++) { // printf("%5d回 ", learnTime); printf("%5d,", learnTime); U32 correctCount_learn = 0; // 正解数 U32 correctCount_sample = 0; // 正解数 // 学習 { // 学習ループ先頭処理 // pBatchDataNoListGenerator->PreProcessLearnLoop(); pTeachInputLayer->PreProcessLoop(); pTeachOutputLayer->PreProcessLoop(); pNeuralNetworkLearn->PreProcessLoop(); // 学習処理 // バッチ単位で処理 for(U32 batchNum=0; batchNum<pBatchDataNoListGenerator->GetBatchDataNoListCount(); batchNum++) { #if USE_GPU if(batchNum%10 == 0) #endif { printf(" L=%5.1f%%", (F32)batchNum * 100 / pBatchDataNoListGenerator->GetBatchDataNoListCount()); printf("\b\b\b\b\b\b\b\b\b"); } // データ切り替え pTeachInputLayer->SetBatchDataNoList(pBatchDataNoListGenerator->GetBatchDataNoListByNum(batchNum)); pTeachOutputLayer->SetBatchDataNoList(pBatchDataNoListGenerator->GetBatchDataNoListByNum(batchNum)); // 演算 CONST_BATCH_BUFFER_POINTER lpInputBuffer[] = {pTeachInputLayer->GetOutputBuffer()}; pNeuralNetworkLearn->Calculate(lpInputBuffer); // 誤差計算 // 教師信号との誤差計算 pTeachOutputLayer->CalculateLearnError(pNeuralNetworkLearn->GetOutputBuffer()); // 学習 BATCH_BUFFER_POINTER lppDInputBuffer[] = {&lpDInputBuffer[0]}; // pNeuralNetworkLearn->Training(lppDInputBuffer, pTeachOutputLayer->GetDInputBuffer()); pNeuralNetworkLearn->Training(NULL, pTeachOutputLayer->GetDInputBuffer()); // 正解率を算出する pTeachOutputLayer->GetOutputBuffer(&lpTeachBuffer[0]); pNeuralNetworkLearn->GetOutputBuffer(&lpOutputBuffer[0]); for(U32 batchDataNum=0; batchDataNum<pTeachOutputLayer->GetBatchSize(); batchDataNum++) { // 正解の番号を取得 U32 correctNo = 0; { F32 curValue = 0.0f; for(U32 i=0; i<pTeachOutputLayer->GetBufferCount(); i++) { U32 bufferPos = batchDataNum * pTeachOutputLayer->GetBufferCount() + i; if(lpTeachBuffer[bufferPos] > curValue) { correctNo = i; curValue = lpTeachBuffer[bufferPos]; } } } // 出力された番号を取得 U32 outputNo = 0; { F32 curValue = 0.0f; for(U32 i=0; i<pTeachOutputLayer->GetBufferCount(); i++) { U32 bufferPos = batchDataNum * pTeachOutputLayer->GetBufferCount() + i; if(lpOutputBuffer[bufferPos] > curValue) { outputNo = i; curValue = lpOutputBuffer[bufferPos]; } } } if(correctNo == outputNo) { correctCount_learn++; } } } } // サンプル実行 { // サンプル実行先頭処理 pSampleInputLayer->PreProcessLoop(); pSampleOutputLayer->PreProcessLoop(); pNeuralNetworkSample->PreProcessLoop(); // バッチ単位で処理 for(U32 dataNum=0; dataNum<pSampleInputLayer->GetDataCount(); dataNum++) { #if USE_GPU if(dataNum%10 == 0) #endif { printf(" T=%5.1f%%", (F32)dataNum * 100 / pSampleInputLayer->GetDataCount()); printf("\b\b\b\b\b\b\b\b\b"); } // データ切り替え pSampleInputLayer->SetBatchDataNoList(&dataNum); pSampleOutputLayer->SetBatchDataNoList(&dataNum); // 演算 CONST_BATCH_BUFFER_POINTER lpInputBuffer[] = {pSampleInputLayer->GetOutputBuffer()}; pNeuralNetworkSample->Calculate(lpInputBuffer); // 誤差計算 pSampleOutputLayer->CalculateLearnError(pNeuralNetworkSample->GetOutputBuffer()); // 正解の番号を取得 pSampleOutputLayer->GetOutputBuffer(&lpTeachBuffer[0]); pNeuralNetworkSample->GetOutputBuffer(&lpOutputBuffer[0]); { U32 correctNo = 0; { F32 curValue = 0.0f; for(U32 i=0; i<pSampleOutputLayer->GetBufferCount(); i++) { if(lpTeachBuffer[i] > curValue) { correctNo = i; curValue = lpTeachBuffer[i]; } } } // 出力された番号を取得 U32 outputNo = 0; { F32 curValue = 0.0f; for(U32 i=0; i<pSampleOutputLayer->GetBufferCount(); i++) { if(lpOutputBuffer[i] > curValue) { outputNo = i; curValue = lpOutputBuffer[i]; } } } if(correctNo == outputNo) { correctCount_sample++; } } } } // 誤差表示 { F32 errorMax, errorAve, errorAve2, errorCrossEntoropy; pTeachOutputLayer->GetCalculateErrorValue(errorMax, errorAve, errorAve2, errorCrossEntoropy); // printf("学習:max=%.3f, ave=%.3f, ave2=%.3f, entropy=%.3f", errorMax, errorAve, errorAve2, errorCrossEntoropy); printf("%.3f,%.3f,%.3f,%.3f,", errorMax, errorAve2, errorCrossEntoropy, (F32)correctCount_learn / (pBatchDataNoListGenerator->GetBatchDataNoListCount() * BATCH_SIZE)); } // printf(" : "); { F32 errorMax, errorAve, errorAve2, errorCrossEntoropy; pSampleOutputLayer->GetCalculateErrorValue(errorMax, errorAve, errorAve2, errorCrossEntoropy); // printf("実行:max=%.3f, ave=%.3f, ave2=%.3f, entropy=%.3f", errorMax, errorAve, errorAve2, errorCrossEntoropy); printf("%.3f,%.3f,%.3f,%.3f", errorMax, errorAve2, errorCrossEntoropy, (F32)correctCount_sample / pSampleInputLayer->GetDataCount()); } printf("\n"); } // メモリ開放 delete pBatchDataNoListGenerator; return ErrorCode::ERROR_CODE_NONE; }
54b9f144575d4741e27b3755144fd3c76f242552.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #define NB 64 /* Matrix is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zsymmetrize_lower( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = cuConj(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void zsymmetrize_upper( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = cuConj(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /** Purpose ------- ZSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) The m by m matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zsymmetrize_q( magma_uplo_t uplo, magma_int_t m, magmaDoubleComplex *dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m) ) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( zsymmetrize_upper), dim3(grid), dim3(threads), 0, queue , m, dA, ldda ); } else { hipLaunchKernelGGL(( zsymmetrize_lower), dim3(grid), dim3(threads), 0, queue , m, dA, ldda ); } } /** @see magmablas_zsymmetrize_q @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zsymmetrize( magma_uplo_t uplo, magma_int_t m, magmaDoubleComplex *dA, magma_int_t ldda ) { magmablas_zsymmetrize_q( uplo, m, dA, ldda, magma_stream ); }
54b9f144575d4741e27b3755144fd3c76f242552.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #define NB 64 /* Matrix is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zsymmetrize_lower( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = cuConj(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void zsymmetrize_upper( int m, magmaDoubleComplex *dA, int ldda ) { // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaDoubleComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaDoubleComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = cuConj(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /** Purpose ------- ZSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in,out] dA COMPLEX_16 array, dimension (LDDA,N) The m by m matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zsymmetrize_q( magma_uplo_t uplo, magma_int_t m, magmaDoubleComplex *dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m) ) info = -4; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB ); if ( uplo == MagmaUpper ) { zsymmetrize_upper<<< grid, threads, 0, queue >>>( m, dA, ldda ); } else { zsymmetrize_lower<<< grid, threads, 0, queue >>>( m, dA, ldda ); } } /** @see magmablas_zsymmetrize_q @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zsymmetrize( magma_uplo_t uplo, magma_int_t m, magmaDoubleComplex *dA, magma_int_t ldda ) { magmablas_zsymmetrize_q( uplo, m, dA, ldda, magma_stream ); }
b4c6705714596edd6fdba8f5902e12087a00f6e8.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::GemmSplitKParallel< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, hipStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
b4c6705714596edd6fdba8f5902e12087a00f6e8.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 256, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::GemmSplitKParallel< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
4f020f1887f1b1f39be51e6a73a69c1b1e405ca4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdint.h> #include <time.h> #define PI 3.14159265 //#define GPU_COMPUTING __global__ void conv(float *tab, int N, float *filter, int s, float *output); void box_filter(float *filter, int size); void gaussian_filter(float *filter, int size); void conv_GPU(float *tab, int N, float *filter, int s, float *tab_filtered, int N_threads); void conv_CPU(float *tab, int N, float *filter, int s, float *tab_filtered); int main(int argc, char const *argv[]) { FILE * fp; int32_t N = (int32_t) atoi(argv[1]); int32_t N_threads = (int32_t)atoi(argv[2]); int32_t s = (int32_t) atoi(argv[3]); float freq1 = atof(argv[4]); float freq2 = atof(argv[5]); float phi = 1.0; float *tab_CPU = (float *) malloc(N*sizeof(float)); float *tab_CPU_box = (float *) malloc(N*sizeof(float)); float *tab_CPU_gaussian = (float *) malloc(N*sizeof(float)); float filter[2*s+1]; for (int i=0; i<N; i++){ tab_CPU[i] = sin(2*PI*i/freq1) + sin(2*PI*i/freq2+phi); } for (int i=1; i<31; i++){ s = (int32_t) 20*i; printf("Iteration %d, ", i); gaussian_filter(filter, s); printf("GPU_gaussian: "); conv_GPU(tab_CPU, N, filter, s, tab_CPU_gaussian, N_threads); printf(", CPU_gaussian: "); conv_CPU(tab_CPU, N, filter, s, tab_CPU_gaussian); box_filter(filter, s); printf(", GPU_box: "); conv_GPU(tab_CPU, N, filter, s, tab_CPU_box, N_threads); printf(", CPU_box: "); conv_CPU(tab_CPU, N, filter, s, tab_CPU_box); printf("\n"); } // for (int i=0; i<2*s+1; i++){ // printf("%f ", filter[i]); // } fp = fopen ("signal.data", "w+"); for (int i=0; i<200; i++){ fprintf(fp, "%f %f %f\n", tab_CPU[i], tab_CPU_box[i], tab_CPU_gaussian[i]); } fclose(fp); return 0; } __global__ void conv(float *tab, int N, float *filter, int s, float *output){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<=N){ output[idx] = filter[s]*tab[idx]; for (int i=1; i<s+1; i++){ if (idx-i >= 0) output[idx] += filter[s+i]*tab[idx-i]; else output[idx] += filter[s+i]*tab[idx-i+N]; if(idx+i < N) output[idx] += filter[s-i]*tab[idx+i]; else output[idx] += filter[s+i]*tab[idx+i-N]; } } } void box_filter(float *filter, int size) { for (int i=0; i<2*size+1; i++){ filter[i] = 1/(float)(2*size+1); } } void gaussian_filter(float *filter, int size) { float s = (float) (2*size+1); float sum = 0; for (int i=0; i<2*size+1; i++){ filter[i] = exp(-(i-size)*(i-size) / (2*s*s)); sum += filter[i]; } for (int i=0; i<2*size+1; i++){ filter[i] /= sum; } } void conv_GPU(float *tab, int N, float *filter, int s, float *tab_filtered, int N_threads) { clock_t start, finish; double duration; start = clock(); float *tab_GPU; float *output_GPU; float *filter_GPU; // Allocate vector in device memory hipMalloc(&tab_GPU, N * sizeof(float)); hipMalloc(&output_GPU, N * sizeof(float)); hipMalloc(&filter_GPU, (2*s+1) * sizeof(float)); // Copy vectors from host memory to device memory hipMemcpy(tab_GPU, tab, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(output_GPU, tab, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(filter_GPU, filter, (2*s+1) * sizeof(float), hipMemcpyHostToDevice); int threadsPerBlock = N_threads; int blocksPerGrid = (int) ceil(N / (float)threadsPerBlock); hipLaunchKernelGGL(( conv), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, tab_GPU, N, filter_GPU, s, output_GPU); hipMemcpy(tab_filtered, output_GPU, N * sizeof(float), hipMemcpyDeviceToHost); hipFree(tab_GPU); hipFree(filter_GPU); hipFree(output_GPU); finish = clock(); duration = (double)(finish - start) / CLOCKS_PER_SEC; printf("%f",duration); } void conv_CPU(float *tab, int N, float *filter, int s, float *tab_filtered){ clock_t start, finish; double duration; start = clock(); double t = (double) time(NULL); for (int idx=0; idx<N; idx++){ tab_filtered[idx] = filter[s]*tab[idx]; for (int i=1; i<s+1; i++){ tab_filtered[idx] += filter[s+i]*tab[(idx-i)%N]; tab_filtered[idx] += filter[s-i]*tab[(idx+i)%N]; } } finish = clock(); duration = (double)(finish - start) / CLOCKS_PER_SEC; printf("%f",duration); }
4f020f1887f1b1f39be51e6a73a69c1b1e405ca4.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdint.h> #include <time.h> #define PI 3.14159265 //#define GPU_COMPUTING __global__ void conv(float *tab, int N, float *filter, int s, float *output); void box_filter(float *filter, int size); void gaussian_filter(float *filter, int size); void conv_GPU(float *tab, int N, float *filter, int s, float *tab_filtered, int N_threads); void conv_CPU(float *tab, int N, float *filter, int s, float *tab_filtered); int main(int argc, char const *argv[]) { FILE * fp; int32_t N = (int32_t) atoi(argv[1]); int32_t N_threads = (int32_t)atoi(argv[2]); int32_t s = (int32_t) atoi(argv[3]); float freq1 = atof(argv[4]); float freq2 = atof(argv[5]); float phi = 1.0; float *tab_CPU = (float *) malloc(N*sizeof(float)); float *tab_CPU_box = (float *) malloc(N*sizeof(float)); float *tab_CPU_gaussian = (float *) malloc(N*sizeof(float)); float filter[2*s+1]; for (int i=0; i<N; i++){ tab_CPU[i] = sin(2*PI*i/freq1) + sin(2*PI*i/freq2+phi); } for (int i=1; i<31; i++){ s = (int32_t) 20*i; printf("Iteration %d, ", i); gaussian_filter(filter, s); printf("GPU_gaussian: "); conv_GPU(tab_CPU, N, filter, s, tab_CPU_gaussian, N_threads); printf(", CPU_gaussian: "); conv_CPU(tab_CPU, N, filter, s, tab_CPU_gaussian); box_filter(filter, s); printf(", GPU_box: "); conv_GPU(tab_CPU, N, filter, s, tab_CPU_box, N_threads); printf(", CPU_box: "); conv_CPU(tab_CPU, N, filter, s, tab_CPU_box); printf("\n"); } // for (int i=0; i<2*s+1; i++){ // printf("%f ", filter[i]); // } fp = fopen ("signal.data", "w+"); for (int i=0; i<200; i++){ fprintf(fp, "%f %f %f\n", tab_CPU[i], tab_CPU_box[i], tab_CPU_gaussian[i]); } fclose(fp); return 0; } __global__ void conv(float *tab, int N, float *filter, int s, float *output){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<=N){ output[idx] = filter[s]*tab[idx]; for (int i=1; i<s+1; i++){ if (idx-i >= 0) output[idx] += filter[s+i]*tab[idx-i]; else output[idx] += filter[s+i]*tab[idx-i+N]; if(idx+i < N) output[idx] += filter[s-i]*tab[idx+i]; else output[idx] += filter[s+i]*tab[idx+i-N]; } } } void box_filter(float *filter, int size) { for (int i=0; i<2*size+1; i++){ filter[i] = 1/(float)(2*size+1); } } void gaussian_filter(float *filter, int size) { float s = (float) (2*size+1); float sum = 0; for (int i=0; i<2*size+1; i++){ filter[i] = exp(-(i-size)*(i-size) / (2*s*s)); sum += filter[i]; } for (int i=0; i<2*size+1; i++){ filter[i] /= sum; } } void conv_GPU(float *tab, int N, float *filter, int s, float *tab_filtered, int N_threads) { clock_t start, finish; double duration; start = clock(); float *tab_GPU; float *output_GPU; float *filter_GPU; // Allocate vector in device memory cudaMalloc(&tab_GPU, N * sizeof(float)); cudaMalloc(&output_GPU, N * sizeof(float)); cudaMalloc(&filter_GPU, (2*s+1) * sizeof(float)); // Copy vectors from host memory to device memory cudaMemcpy(tab_GPU, tab, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(output_GPU, tab, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(filter_GPU, filter, (2*s+1) * sizeof(float), cudaMemcpyHostToDevice); int threadsPerBlock = N_threads; int blocksPerGrid = (int) ceil(N / (float)threadsPerBlock); conv<<<blocksPerGrid,threadsPerBlock>>>(tab_GPU, N, filter_GPU, s, output_GPU); cudaMemcpy(tab_filtered, output_GPU, N * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(tab_GPU); cudaFree(filter_GPU); cudaFree(output_GPU); finish = clock(); duration = (double)(finish - start) / CLOCKS_PER_SEC; printf("%f",duration); } void conv_CPU(float *tab, int N, float *filter, int s, float *tab_filtered){ clock_t start, finish; double duration; start = clock(); double t = (double) time(NULL); for (int idx=0; idx<N; idx++){ tab_filtered[idx] = filter[s]*tab[idx]; for (int i=1; i<s+1; i++){ tab_filtered[idx] += filter[s+i]*tab[(idx-i)%N]; tab_filtered[idx] += filter[s-i]*tab[(idx+i)%N]; } } finish = clock(); duration = (double)(finish - start) / CLOCKS_PER_SEC; printf("%f",duration); }
0e1ea03819f153e9aaf7e9282e62f78b991df2f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pytorch_cuda_helper.hpp" #include "carafe_naive_cuda_kernel.cuh" int CARAFENAIVEForwardCUDAKernelLauncher(const Tensor features, const Tensor masks, Tensor output, const int kernel_size, const int group_size, const int scale_factor) { int output_size = output.numel(); int channels = output.size(1); int height = output.size(2); int width = output.size(3); at::hip::HIPGuardMasqueradingAsCUDA device_guard(features.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "CARAFENAIVEForward", ([&] { hipLaunchKernelGGL(( carafe_naive_forward_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, features.data_ptr<scalar_t>(), masks.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), kernel_size, group_size, scale_factor, channels, height, width); })); AT_CUDA_CHECK(hipGetLastError()); return 0; } int CARAFENAIVEBackwardCUDAKernelLauncher( const Tensor top_grad, const Tensor features, const Tensor masks, Tensor bottom_grad, Tensor mask_grad, const int kernel_size, const int group_size, const int scale_factor) { int output_size = top_grad.numel(); int channels = top_grad.size(1); int height = top_grad.size(2); int width = top_grad.size(3); at::hip::HIPGuardMasqueradingAsCUDA device_guard(top_grad.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "CARAFENAIVEBackward", ([&] { hipLaunchKernelGGL(( carafe_naive_backward_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, top_grad.data_ptr<scalar_t>(), features.data_ptr<scalar_t>(), masks.data_ptr<scalar_t>(), bottom_grad.data_ptr<scalar_t>(), mask_grad.data_ptr<scalar_t>(), kernel_size, group_size, scale_factor, channels, height, width); })); AT_CUDA_CHECK(hipGetLastError()); return 0; }
0e1ea03819f153e9aaf7e9282e62f78b991df2f0.cu
#include "pytorch_cuda_helper.hpp" #include "carafe_naive_cuda_kernel.cuh" int CARAFENAIVEForwardCUDAKernelLauncher(const Tensor features, const Tensor masks, Tensor output, const int kernel_size, const int group_size, const int scale_factor) { int output_size = output.numel(); int channels = output.size(1); int height = output.size(2); int width = output.size(3); at::cuda::CUDAGuard device_guard(features.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "CARAFENAIVEForward", ([&] { carafe_naive_forward_cuda_kernel<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( output_size, features.data_ptr<scalar_t>(), masks.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), kernel_size, group_size, scale_factor, channels, height, width); })); AT_CUDA_CHECK(cudaGetLastError()); return 0; } int CARAFENAIVEBackwardCUDAKernelLauncher( const Tensor top_grad, const Tensor features, const Tensor masks, Tensor bottom_grad, Tensor mask_grad, const int kernel_size, const int group_size, const int scale_factor) { int output_size = top_grad.numel(); int channels = top_grad.size(1); int height = top_grad.size(2); int width = top_grad.size(3); at::cuda::CUDAGuard device_guard(top_grad.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "CARAFENAIVEBackward", ([&] { carafe_naive_backward_cuda_kernel<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( output_size, top_grad.data_ptr<scalar_t>(), features.data_ptr<scalar_t>(), masks.data_ptr<scalar_t>(), bottom_grad.data_ptr<scalar_t>(), mask_grad.data_ptr<scalar_t>(), kernel_size, group_size, scale_factor, channels, height, width); })); AT_CUDA_CHECK(cudaGetLastError()); return 0; }
95267279a29e7c8c09a7a2dbba6b8c1600c553de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void warmup(float *input, float *output) { const int i = threadIdx.x + blockIdx.x * blockDim.x; output[i] = input[i] * input[i]; }
95267279a29e7c8c09a7a2dbba6b8c1600c553de.cu
#include "includes.h" __global__ void warmup(float *input, float *output) { const int i = threadIdx.x + blockIdx.x * blockDim.x; output[i] = input[i] * input[i]; }
b9322fbed58e112f8a9f89dc03ad51dadb0c34ef.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <cstring> #include <hip/hip_runtime.h> #include <map> #include <cassert> #include <cstring> #include "bitonic.hxx" #define start_index_s0 2*i*batch_size #define start_index_s1 2*i*batch_size+batch_size #define start_index_s2 2*i*batch_size-batch_size #define merge_index_1 2*(i-1)*batch_size #define merge_index_2 2*(i-1)*batch_size+batch_size void PairMerge(uint64_t *key_array_1, uint64_t *key_array_2, uint64_t batch_size, int nthreads); void BitonicSort(uint64_t *h_key_array, uint64_t *d_key_array[2], uint64_t number_of_elements, uint64_t batch_size, int nthreads) { int number_of_batches = number_of_elements / batch_size; uint64_t *pinned_M[2]; hipMalloc( (void**)&d_key_array[0], batch_size * sizeof(uint64_t) ); hipMalloc( (void**)&d_key_array[1], batch_size * sizeof(uint64_t) ); hipHostMalloc( (void**)&pinned_M[0], batch_size * sizeof(uint64_t), hipHostMallocDefault ); hipHostMalloc( (void**)&pinned_M[1], batch_size * sizeof(uint64_t), hipHostMallocDefault ); hipStream_t streams[2]; for (int s = 0; s < 2; s++) { hipStreamCreate(&streams[s]); } for (int i = 0; i < number_of_batches / 2; i++) { for (int s = 0; s < 2; s++) { if (i == 0 && s == 0) { std::memcpy(pinned_M[0], &h_key_array[start_index_s0], batch_size*sizeof(uint64_t)); hipMemcpyAsync(d_key_array[0], pinned_M[0], batch_size*sizeof(uint64_t), hipMemcpyHostToDevice, streams[0]); hipDeviceSynchronize(); //thrust::sort(thrust::hip::par(alloc).on(streams[0]), th_key_array[0], th_key_array[0]+batch_size); bitonicSort<uint64_t, cmp>(d_key_array[0], batch_size, 256, 32, streams[0]); hipDeviceSynchronize(); } else if (i > 0 && s == 0) { std::memcpy(pinned_M[0], &h_key_array[start_index_s0], batch_size*sizeof(uint64_t)); hipMemcpyAsync(d_key_array[0], pinned_M[0], batch_size*sizeof(uint64_t), hipMemcpyHostToDevice, streams[0]); hipMemcpyAsync(pinned_M[1], d_key_array[1], batch_size*sizeof(uint64_t), hipMemcpyDeviceToHost, streams[1]); hipDeviceSynchronize(); std::memcpy(&h_key_array[start_index_s2], pinned_M[1], batch_size*sizeof(uint64_t)); //thrust::sort(thrust::hip::par(alloc).on(streams[0]), th_key_array[0], th_key_array[0]+batch_size); bitonicSort<uint64_t, cmp>(d_key_array[0], batch_size, 256, 32, streams[0]); PairMerge(&h_key_array[merge_index_1], &h_key_array[merge_index_2], batch_size, nthreads); hipDeviceSynchronize(); } else if (s == 1) { std::memcpy(pinned_M[1], &h_key_array[start_index_s1], batch_size*sizeof(uint64_t)); hipMemcpyAsync(d_key_array[1], pinned_M[1], batch_size*sizeof(uint64_t), hipMemcpyHostToDevice, streams[1]); hipMemcpyAsync(pinned_M[0], d_key_array[0], batch_size*sizeof(uint64_t), hipMemcpyDeviceToHost, streams[0]); hipDeviceSynchronize(); std::memcpy(&h_key_array[start_index_s0], pinned_M[0], batch_size*sizeof(uint64_t)); //thrust::sort(thrust::hip::par(alloc).on(streams[1]), th_key_array[1], th_key_array[1]+batch_size); bitonicSort<uint64_t, cmp>(d_key_array[1], batch_size, 256, 32, streams[1]); hipDeviceSynchronize(); if (i == (number_of_batches / 2) - 1) { hipMemcpyAsync(pinned_M[1], d_key_array[1], batch_size*sizeof(uint64_t), hipMemcpyDeviceToHost, streams[1]); hipDeviceSynchronize(); std::memcpy(&h_key_array[start_index_s1], pinned_M[1], batch_size*sizeof(uint64_t)); } } } } for (int s = 0; s < 2; s++) { hipStreamDestroy(streams[s]); } return; }
b9322fbed58e112f8a9f89dc03ad51dadb0c34ef.cu
#include <stdio.h> #include <cstring> #include <cuda_runtime.h> #include <map> #include <cassert> #include <cstring> #include "bitonic.hxx" #define start_index_s0 2*i*batch_size #define start_index_s1 2*i*batch_size+batch_size #define start_index_s2 2*i*batch_size-batch_size #define merge_index_1 2*(i-1)*batch_size #define merge_index_2 2*(i-1)*batch_size+batch_size void PairMerge(uint64_t *key_array_1, uint64_t *key_array_2, uint64_t batch_size, int nthreads); void BitonicSort(uint64_t *h_key_array, uint64_t *d_key_array[2], uint64_t number_of_elements, uint64_t batch_size, int nthreads) { int number_of_batches = number_of_elements / batch_size; uint64_t *pinned_M[2]; cudaMalloc( (void**)&d_key_array[0], batch_size * sizeof(uint64_t) ); cudaMalloc( (void**)&d_key_array[1], batch_size * sizeof(uint64_t) ); cudaHostAlloc( (void**)&pinned_M[0], batch_size * sizeof(uint64_t), cudaHostAllocDefault ); cudaHostAlloc( (void**)&pinned_M[1], batch_size * sizeof(uint64_t), cudaHostAllocDefault ); cudaStream_t streams[2]; for (int s = 0; s < 2; s++) { cudaStreamCreate(&streams[s]); } for (int i = 0; i < number_of_batches / 2; i++) { for (int s = 0; s < 2; s++) { if (i == 0 && s == 0) { std::memcpy(pinned_M[0], &h_key_array[start_index_s0], batch_size*sizeof(uint64_t)); cudaMemcpyAsync(d_key_array[0], pinned_M[0], batch_size*sizeof(uint64_t), cudaMemcpyHostToDevice, streams[0]); cudaDeviceSynchronize(); //thrust::sort(thrust::cuda::par(alloc).on(streams[0]), th_key_array[0], th_key_array[0]+batch_size); bitonicSort<uint64_t, cmp>(d_key_array[0], batch_size, 256, 32, streams[0]); cudaDeviceSynchronize(); } else if (i > 0 && s == 0) { std::memcpy(pinned_M[0], &h_key_array[start_index_s0], batch_size*sizeof(uint64_t)); cudaMemcpyAsync(d_key_array[0], pinned_M[0], batch_size*sizeof(uint64_t), cudaMemcpyHostToDevice, streams[0]); cudaMemcpyAsync(pinned_M[1], d_key_array[1], batch_size*sizeof(uint64_t), cudaMemcpyDeviceToHost, streams[1]); cudaDeviceSynchronize(); std::memcpy(&h_key_array[start_index_s2], pinned_M[1], batch_size*sizeof(uint64_t)); //thrust::sort(thrust::cuda::par(alloc).on(streams[0]), th_key_array[0], th_key_array[0]+batch_size); bitonicSort<uint64_t, cmp>(d_key_array[0], batch_size, 256, 32, streams[0]); PairMerge(&h_key_array[merge_index_1], &h_key_array[merge_index_2], batch_size, nthreads); cudaDeviceSynchronize(); } else if (s == 1) { std::memcpy(pinned_M[1], &h_key_array[start_index_s1], batch_size*sizeof(uint64_t)); cudaMemcpyAsync(d_key_array[1], pinned_M[1], batch_size*sizeof(uint64_t), cudaMemcpyHostToDevice, streams[1]); cudaMemcpyAsync(pinned_M[0], d_key_array[0], batch_size*sizeof(uint64_t), cudaMemcpyDeviceToHost, streams[0]); cudaDeviceSynchronize(); std::memcpy(&h_key_array[start_index_s0], pinned_M[0], batch_size*sizeof(uint64_t)); //thrust::sort(thrust::cuda::par(alloc).on(streams[1]), th_key_array[1], th_key_array[1]+batch_size); bitonicSort<uint64_t, cmp>(d_key_array[1], batch_size, 256, 32, streams[1]); cudaDeviceSynchronize(); if (i == (number_of_batches / 2) - 1) { cudaMemcpyAsync(pinned_M[1], d_key_array[1], batch_size*sizeof(uint64_t), cudaMemcpyDeviceToHost, streams[1]); cudaDeviceSynchronize(); std::memcpy(&h_key_array[start_index_s1], pinned_M[1], batch_size*sizeof(uint64_t)); } } } } for (int s = 0; s < 2; s++) { cudaStreamDestroy(streams[s]); } return; }
874b08ccf54cbc4a9dd1df9d07338074cb012027.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Connected Components in the GPU * Paper Source: An Optimized Union-Find Algorithm for Connected * Components Labeling Using GPUs * Adapted from: https://github.com/victormatheus/CCL-GPU * Modified by: Imanol Luengo <[email protected]> */ typedef unsigned int uint32; #define MAX_UINT32 0xFFFFFFFF __device__ uint32 find(uint32* buf, uint32 x) { uint32 p = x; while ( x != buf[x] ) { x = buf[x]; } buf[p] = x; // fast linking return x; } __device__ void findAndUnion(uint32* buf, uint32 g1, uint32 g2) { bool done; uint32 old; do { g1 = find(buf, g1); g2 = find(buf, g2); if (g1 < g2) { old = atomicMin(&buf[g2], g1); done = (old == g2); g2 = old; } else if (g2 < g1) { old = atomicMin(&buf[g1], g2); done = (old == g1); g1 = old; } else { done = true; } } while ( !done ); } __global__ void uf_local(const uint32* in, uint32* out, int3 shape) { int3 p; p.z = blockIdx.z * blockDim.z + threadIdx.z; p.y = blockIdx.y * blockDim.y + threadIdx.y; p.x = blockIdx.x * blockDim.x + threadIdx.x; long image_plane = (shape.y * shape.x); long block_plane = (blockDim.y * blockDim.x); uint32 gidx = p.z * image_plane + p.y * shape.x + p.x; uint32 bidx = threadIdx.z * block_plane + \ threadIdx.y * blockDim.x + \ threadIdx.x; long bsize = blockDim.z * blockDim.y * blockDim.x; extern __shared__ uint32 s_buffer[]; bool in_limits = p.z < shape.z && p.y < shape.y && p.x < shape.x; s_buffer[bidx] = bidx; s_buffer[bsize + bidx] = in_limits? in[p.z * image_plane + p.y * shape.x + p.x] : 0; __syncthreads(); if ( !in_limits ) {return;} uint32 v = s_buffer[bsize + bidx]; if ( threadIdx.x > 0 && s_buffer[bsize + bidx - 1] == v ) { findAndUnion(s_buffer, bidx, bidx - 1); } __syncthreads(); if ( threadIdx.y > 0 && s_buffer[bsize + bidx - blockDim.x] == v ) { findAndUnion(s_buffer, bidx, bidx - blockDim.x); } __syncthreads(); if ( threadIdx.z > 0 && s_buffer[bsize + bidx - block_plane] == v ) { findAndUnion(s_buffer, bidx, bidx - block_plane); } __syncthreads(); uint32 f = find(s_buffer, bidx); uint32 aux = f % block_plane; uint32 fz = f / block_plane; uint32 fy = aux / blockDim.x; uint32 fx = aux % blockDim.x; out[gidx] = (blockIdx.z * blockDim.z + fz) * image_plane + \ (blockIdx.y * blockDim.y + fy) * shape.x + \ (blockIdx.x * blockDim.x + fx); } __global__ void uf_global(const uint32* in, uint32* out, int3 shape) { int3 p; p.z = blockIdx.z * blockDim.z + threadIdx.z; p.y = blockIdx.y * blockDim.y + threadIdx.y; p.x = blockIdx.x * blockDim.x + threadIdx.x; long image_plane = (shape.y * shape.x); uint32 gidx = p.z * image_plane + p.y * shape.x + p.x; if ( p.z >= shape.z || p.y >= shape.y || p.x >= shape.x ) { return; } uint32 v = in[gidx]; if ( p.z > 0 && threadIdx.z == 0 && in[gidx - image_plane] == v ) { findAndUnion(out, gidx, gidx - image_plane); } if ( p.y > 0 && threadIdx.y == 0 && in[gidx - shape.x] == v ) { findAndUnion(out, gidx, gidx - shape.x); } if ( p.x > 0 && threadIdx.x == 0 && in[gidx - 1] == v ) { findAndUnion(out, gidx, gidx - 1); } } __global__ void uf_final(uint32* labels, int3 shape) { int3 p; p.z = blockIdx.z * blockDim.z + threadIdx.z; p.y = blockIdx.y * blockDim.y + threadIdx.y; p.x = blockIdx.x * blockDim.x + threadIdx.x; long gidx = p.z * shape.y * shape.x + p.y * shape.x + p.x; if ( p.z < shape.z && p.y < shape.y && p.x < shape.x ) { labels[gidx] = find(labels, gidx); } }
874b08ccf54cbc4a9dd1df9d07338074cb012027.cu
/* * Connected Components in the GPU * Paper Source: An Optimized Union-Find Algorithm for Connected * Components Labeling Using GPUs * Adapted from: https://github.com/victormatheus/CCL-GPU * Modified by: Imanol Luengo <[email protected]> */ typedef unsigned int uint32; #define MAX_UINT32 0xFFFFFFFF __device__ uint32 find(uint32* buf, uint32 x) { uint32 p = x; while ( x != buf[x] ) { x = buf[x]; } buf[p] = x; // fast linking return x; } __device__ void findAndUnion(uint32* buf, uint32 g1, uint32 g2) { bool done; uint32 old; do { g1 = find(buf, g1); g2 = find(buf, g2); if (g1 < g2) { old = atomicMin(&buf[g2], g1); done = (old == g2); g2 = old; } else if (g2 < g1) { old = atomicMin(&buf[g1], g2); done = (old == g1); g1 = old; } else { done = true; } } while ( !done ); } __global__ void uf_local(const uint32* in, uint32* out, int3 shape) { int3 p; p.z = blockIdx.z * blockDim.z + threadIdx.z; p.y = blockIdx.y * blockDim.y + threadIdx.y; p.x = blockIdx.x * blockDim.x + threadIdx.x; long image_plane = (shape.y * shape.x); long block_plane = (blockDim.y * blockDim.x); uint32 gidx = p.z * image_plane + p.y * shape.x + p.x; uint32 bidx = threadIdx.z * block_plane + \ threadIdx.y * blockDim.x + \ threadIdx.x; long bsize = blockDim.z * blockDim.y * blockDim.x; extern __shared__ uint32 s_buffer[]; bool in_limits = p.z < shape.z && p.y < shape.y && p.x < shape.x; s_buffer[bidx] = bidx; s_buffer[bsize + bidx] = in_limits? in[p.z * image_plane + p.y * shape.x + p.x] : 0; __syncthreads(); if ( !in_limits ) {return;} uint32 v = s_buffer[bsize + bidx]; if ( threadIdx.x > 0 && s_buffer[bsize + bidx - 1] == v ) { findAndUnion(s_buffer, bidx, bidx - 1); } __syncthreads(); if ( threadIdx.y > 0 && s_buffer[bsize + bidx - blockDim.x] == v ) { findAndUnion(s_buffer, bidx, bidx - blockDim.x); } __syncthreads(); if ( threadIdx.z > 0 && s_buffer[bsize + bidx - block_plane] == v ) { findAndUnion(s_buffer, bidx, bidx - block_plane); } __syncthreads(); uint32 f = find(s_buffer, bidx); uint32 aux = f % block_plane; uint32 fz = f / block_plane; uint32 fy = aux / blockDim.x; uint32 fx = aux % blockDim.x; out[gidx] = (blockIdx.z * blockDim.z + fz) * image_plane + \ (blockIdx.y * blockDim.y + fy) * shape.x + \ (blockIdx.x * blockDim.x + fx); } __global__ void uf_global(const uint32* in, uint32* out, int3 shape) { int3 p; p.z = blockIdx.z * blockDim.z + threadIdx.z; p.y = blockIdx.y * blockDim.y + threadIdx.y; p.x = blockIdx.x * blockDim.x + threadIdx.x; long image_plane = (shape.y * shape.x); uint32 gidx = p.z * image_plane + p.y * shape.x + p.x; if ( p.z >= shape.z || p.y >= shape.y || p.x >= shape.x ) { return; } uint32 v = in[gidx]; if ( p.z > 0 && threadIdx.z == 0 && in[gidx - image_plane] == v ) { findAndUnion(out, gidx, gidx - image_plane); } if ( p.y > 0 && threadIdx.y == 0 && in[gidx - shape.x] == v ) { findAndUnion(out, gidx, gidx - shape.x); } if ( p.x > 0 && threadIdx.x == 0 && in[gidx - 1] == v ) { findAndUnion(out, gidx, gidx - 1); } } __global__ void uf_final(uint32* labels, int3 shape) { int3 p; p.z = blockIdx.z * blockDim.z + threadIdx.z; p.y = blockIdx.y * blockDim.y + threadIdx.y; p.x = blockIdx.x * blockDim.x + threadIdx.x; long gidx = p.z * shape.y * shape.x + p.y * shape.x + p.x; if ( p.z < shape.z && p.y < shape.y && p.x < shape.x ) { labels[gidx] = find(labels, gidx); } }
ff4b1561ed950b0f53bd322db4e7c6fa0e625b34.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> // Host input vectors. float *uva_a; float *uva_b; // Host output vector. float *uva_c; // Size of arrays. int n = 0; /* CUDA kernel. Each thread takes care of one element of c. */ __global__ void vecAdd(float *a, float *b, float *c, int n) { // Get our global thread ID int id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] + b[id]; } void init_array() { fprintf(stdout, "Inicializando os arrays.\n"); int i; // Initialize vectors on host. for (i = 0; i < n; i++) { uva_a[i] = sinf(i) * sinf(i); uva_b[i] = cosf(i) * cosf(i); } } void print_array() { int i; printf("Imprimindo o Resultado.\n"); for (i = 0; i < n; i++) { fprintf(stdout, "uva_c[%07d]: %f\n", i, uva_c[i]); } } void check_result(){ // Soma dos elementos do array C e divide por N, o valor deve ser igual a 1. int i; float sum = 0; fprintf(stdout, "Verificando o Resultado.\n"); for (i = 0; i < n; i++) { sum += uva_c[i]; } fprintf(stdout, "Resultado Final: (%f, %f)\n", sum, (float)(sum / (float)n)); } /* Main code */ int main(int argc, char *argv[]) { // Size of vectors n = atoi(argv[1]); printf("Nmero de Elementos: %d\n", n); // Size, in bytes, of each vector size_t bytes = n * sizeof(float); printf("Memria que ser alocada para os 3 arrays: %d\n", 3 * bytes); printf("Allocate memory for each vector on host\n"); // Allocate memory for each vector on host hipMallocManaged(&uva_a, bytes); hipMallocManaged(&uva_b, bytes); hipMallocManaged(&uva_c, bytes); printf("Initialize vectors on host\n"); init_array(); // Number of threads in each thread block. int threadsPerBlock = 256; // Number of thread blocks in grid. int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock; printf("Execute the kernel\n"); hipEvent_t start_event, stop_event; float time_kernel_execution; int eventflags = hipEventBlockingSync; hipEventCreateWithFlags(&start_event, eventflags); hipEventCreateWithFlags(&stop_event, eventflags); /* Recording the time to kernel execution */ hipEventRecord(start_event, 0); /* Execute the kernel. */ hipLaunchKernelGGL(( vecAdd) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, uva_a, uva_b, uva_c, n); /* Synchronize */ hipDeviceSynchronize(); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&time_kernel_execution, start_event, stop_event); printf("Time Kernel Execution: %f s\n", (time_kernel_execution / 1000.0f)); print_array(); check_result(); printf("Time Kernel Execution: %f ms\n", (time_kernel_execution)); // Release device memory hipFree(uva_a); hipFree(uva_b); hipFree(uva_c); return 0; }
ff4b1561ed950b0f53bd322db4e7c6fa0e625b34.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> // Host input vectors. float *uva_a; float *uva_b; // Host output vector. float *uva_c; // Size of arrays. int n = 0; /* CUDA kernel. Each thread takes care of one element of c. */ __global__ void vecAdd(float *a, float *b, float *c, int n) { // Get our global thread ID int id = blockIdx.x * blockDim.x + threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] + b[id]; } void init_array() { fprintf(stdout, "Inicializando os arrays.\n"); int i; // Initialize vectors on host. for (i = 0; i < n; i++) { uva_a[i] = sinf(i) * sinf(i); uva_b[i] = cosf(i) * cosf(i); } } void print_array() { int i; printf("Imprimindo o Resultado.\n"); for (i = 0; i < n; i++) { fprintf(stdout, "uva_c[%07d]: %f\n", i, uva_c[i]); } } void check_result(){ // Soma dos elementos do array C e divide por N, o valor deve ser igual a 1. int i; float sum = 0; fprintf(stdout, "Verificando o Resultado.\n"); for (i = 0; i < n; i++) { sum += uva_c[i]; } fprintf(stdout, "Resultado Final: (%f, %f)\n", sum, (float)(sum / (float)n)); } /* Main code */ int main(int argc, char *argv[]) { // Size of vectors n = atoi(argv[1]); printf("Número de Elementos: %d\n", n); // Size, in bytes, of each vector size_t bytes = n * sizeof(float); printf("Memória que será alocada para os 3 arrays: %d\n", 3 * bytes); printf("Allocate memory for each vector on host\n"); // Allocate memory for each vector on host cudaMallocManaged(&uva_a, bytes); cudaMallocManaged(&uva_b, bytes); cudaMallocManaged(&uva_c, bytes); printf("Initialize vectors on host\n"); init_array(); // Number of threads in each thread block. int threadsPerBlock = 256; // Number of thread blocks in grid. int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock; printf("Execute the kernel\n"); cudaEvent_t start_event, stop_event; float time_kernel_execution; int eventflags = cudaEventBlockingSync; cudaEventCreateWithFlags(&start_event, eventflags); cudaEventCreateWithFlags(&stop_event, eventflags); /* Recording the time to kernel execution */ cudaEventRecord(start_event, 0); /* Execute the kernel. */ vecAdd <<< blocksPerGrid, threadsPerBlock >>> (uva_a, uva_b, uva_c, n); /* Synchronize */ cudaDeviceSynchronize(); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&time_kernel_execution, start_event, stop_event); printf("Time Kernel Execution: %f s\n", (time_kernel_execution / 1000.0f)); print_array(); check_result(); printf("Time Kernel Execution: %f ms\n", (time_kernel_execution)); // Release device memory cudaFree(uva_a); cudaFree(uva_b); cudaFree(uva_c); return 0; }
a845130ca8cacf134329dee01458ffdd2fed4fc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <CylinderCollision_Kernel.cuh> #include <double3.h> __global__ void collisionCylinder(int nbBodies, double3* newPos, double3* newVel, float radiusParticle, float dt, float elast, bool container, float3 center, float baseRadius, float l, float3 direction) { int indexP = blockIdx.x*blockDim.x + threadIdx.x; if(indexP < nbBodies){ double3 posAp = newPos[indexP]; double3 velAp = newVel[indexP]; double3 pInter = make_double3(posAp.x,posAp.y,posAp.z); double3 nInter = make_double3(0,0,0); bool collision = false; double3 P1 = make_double3(center.x + direction.x*(l/2),center.y + direction.y*(l/2),center.z + direction.z*(l/2)); double3 P2 = make_double3(center.x - direction.x*(l/2),center.y - direction.y*(l/2),center.z - direction.z*(l/2)); double3 C = make_double3(center.x + posAp.x*direction.x, center.y + posAp.y*direction.y,center.z + posAp.z*direction.x); double dist = sqrt(powf(center.x-C.x,2)+powf(center.y-C.y,2)+powf(center.z-C.z,2)); if(dist>=(l/2)){ double dist1 = sqrt(powf(P1.x-C.x,2)+powf(P1.y-C.y,2)+powf(P1.z-C.z,2)); double dist2 = sqrt(powf(P2.x-C.x,2)+powf(P2.y-C.y,2)+powf(P2.z-C.z,2)); collision = true; if(dist1>dist2){ double3 dir = make_double3(C.x-posAp.x,C.y-posAp.y,C.z-posAp.z); double lDir = sqrt(dir.x*dir.x + dir.y*dir.y + dir.z*dir.z); dir.x = dir.x/lDir; dir.y = dir.y/lDir; dir.z = dir.z/lDir; if(direction.x!=0) pInter.x = P2.x; if(direction.y!=0) pInter.y = P2.y; if(direction.z!=0) pInter.z = P2.z; if(lDir>=baseRadius){ if(direction.x==0) pInter.x = C.x - (baseRadius-0.04)*dir.x; if(direction.y==0) pInter.y = C.y - (baseRadius-0.04)*dir.y; if(direction.z==0) pInter.z = C.z - (baseRadius-0.04)*dir.z; } nInter = make_double3(direction.x,direction.y,direction.z); double lnInter = sqrt(powf(nInter.x,2)+powf(nInter.y,2)+powf(nInter.z,2)); nInter.x = nInter.x/lnInter; nInter.y = nInter.y/lnInter; nInter.z = nInter.z/lnInter; } else { double3 dir = make_double3(C.x-posAp.x,C.y-posAp.y,C.z-posAp.z); double lDir = sqrt(dir.x*dir.x + dir.y*dir.y + dir.z*dir.z); dir.x = -dir.x/lDir; dir.y = -dir.y/lDir; dir.z = -dir.z/lDir; if(direction.x!=0) pInter.x = P1.x; if(direction.y!=0) pInter.y = P1.y; if(direction.z!=0) pInter.z = P1.z; if(lDir>=baseRadius){ if(direction.x==0) pInter.x = C.x + (baseRadius-0.04)*dir.x; if(direction.y==0) pInter.y = C.y + (baseRadius-0.04)*dir.y; if(direction.z==0) pInter.z = C.z + (baseRadius-0.04)*dir.z; } nInter = make_double3(-direction.x,-direction.y,-direction.z); double lnInter = sqrt(powf(nInter.x,2)+powf(nInter.y,2)+powf(nInter.z,2)); nInter.x = nInter.x/lnInter; nInter.y = nInter.y/lnInter; nInter.z = nInter.z/lnInter; } } else { double dist1 = sqrt(powf(C.x-posAp.x,2)+powf(C.y-posAp.y,2)+powf(C.z-posAp.z,2)); if(dist1>=baseRadius){ collision = true; double3 dir = make_double3(C.x-posAp.x,C.y-posAp.y,C.z-posAp.z); double lDir = sqrt(dir.x*dir.x + dir.y*dir.y + dir.z*dir.z); dir.x = -dir.x/lDir; dir.y = -dir.y/lDir; dir.z = -dir.z/lDir; pInter.x = C.x + (baseRadius-0.04)*dir.x*(1-direction.x); pInter.y = C.y + (baseRadius-0.04)*dir.y*(1-direction.y); pInter.z = C.z + (baseRadius-0.04)*dir.z*(1-direction.z); nInter = dir; } } if(collision==true){ newPos[indexP] = make_double3(pInter.x,pInter.y,pInter.z); float r = 0; double lV = sqrt(velAp.x*velAp.x+velAp.y*velAp.y+velAp.z*velAp.z); elast = 0.3; if(lV>0 && elast>0 && dt>0){ double3 PI = make_double3(posAp.x-pInter.x,posAp.y-pInter.y,posAp.z-pInter.z); double lPI = sqrt(PI.x*PI.x + PI.y*PI.y + PI.z*PI.z); r = elast*lPI/(dt*lV); } double3 V; V.x = velAp.x - (1+r)*dot(velAp,nInter)*nInter.x; V.y = velAp.y - (1+r)*dot(velAp,nInter)*nInter.y; V.z = velAp.z - (1+r)*dot(velAp,nInter)*nInter.z; newVel[indexP] = make_double3(V.x,V.y,V.z); } } }
a845130ca8cacf134329dee01458ffdd2fed4fc9.cu
#include <CylinderCollision_Kernel.cuh> #include <double3.h> __global__ void collisionCylinder(int nbBodies, double3* newPos, double3* newVel, float radiusParticle, float dt, float elast, bool container, float3 center, float baseRadius, float l, float3 direction) { int indexP = blockIdx.x*blockDim.x + threadIdx.x; if(indexP < nbBodies){ double3 posAp = newPos[indexP]; double3 velAp = newVel[indexP]; double3 pInter = make_double3(posAp.x,posAp.y,posAp.z); double3 nInter = make_double3(0,0,0); bool collision = false; double3 P1 = make_double3(center.x + direction.x*(l/2),center.y + direction.y*(l/2),center.z + direction.z*(l/2)); double3 P2 = make_double3(center.x - direction.x*(l/2),center.y - direction.y*(l/2),center.z - direction.z*(l/2)); double3 C = make_double3(center.x + posAp.x*direction.x, center.y + posAp.y*direction.y,center.z + posAp.z*direction.x); double dist = sqrt(powf(center.x-C.x,2)+powf(center.y-C.y,2)+powf(center.z-C.z,2)); if(dist>=(l/2)){ double dist1 = sqrt(powf(P1.x-C.x,2)+powf(P1.y-C.y,2)+powf(P1.z-C.z,2)); double dist2 = sqrt(powf(P2.x-C.x,2)+powf(P2.y-C.y,2)+powf(P2.z-C.z,2)); collision = true; if(dist1>dist2){ double3 dir = make_double3(C.x-posAp.x,C.y-posAp.y,C.z-posAp.z); double lDir = sqrt(dir.x*dir.x + dir.y*dir.y + dir.z*dir.z); dir.x = dir.x/lDir; dir.y = dir.y/lDir; dir.z = dir.z/lDir; if(direction.x!=0) pInter.x = P2.x; if(direction.y!=0) pInter.y = P2.y; if(direction.z!=0) pInter.z = P2.z; if(lDir>=baseRadius){ if(direction.x==0) pInter.x = C.x - (baseRadius-0.04)*dir.x; if(direction.y==0) pInter.y = C.y - (baseRadius-0.04)*dir.y; if(direction.z==0) pInter.z = C.z - (baseRadius-0.04)*dir.z; } nInter = make_double3(direction.x,direction.y,direction.z); double lnInter = sqrt(powf(nInter.x,2)+powf(nInter.y,2)+powf(nInter.z,2)); nInter.x = nInter.x/lnInter; nInter.y = nInter.y/lnInter; nInter.z = nInter.z/lnInter; } else { double3 dir = make_double3(C.x-posAp.x,C.y-posAp.y,C.z-posAp.z); double lDir = sqrt(dir.x*dir.x + dir.y*dir.y + dir.z*dir.z); dir.x = -dir.x/lDir; dir.y = -dir.y/lDir; dir.z = -dir.z/lDir; if(direction.x!=0) pInter.x = P1.x; if(direction.y!=0) pInter.y = P1.y; if(direction.z!=0) pInter.z = P1.z; if(lDir>=baseRadius){ if(direction.x==0) pInter.x = C.x + (baseRadius-0.04)*dir.x; if(direction.y==0) pInter.y = C.y + (baseRadius-0.04)*dir.y; if(direction.z==0) pInter.z = C.z + (baseRadius-0.04)*dir.z; } nInter = make_double3(-direction.x,-direction.y,-direction.z); double lnInter = sqrt(powf(nInter.x,2)+powf(nInter.y,2)+powf(nInter.z,2)); nInter.x = nInter.x/lnInter; nInter.y = nInter.y/lnInter; nInter.z = nInter.z/lnInter; } } else { double dist1 = sqrt(powf(C.x-posAp.x,2)+powf(C.y-posAp.y,2)+powf(C.z-posAp.z,2)); if(dist1>=baseRadius){ collision = true; double3 dir = make_double3(C.x-posAp.x,C.y-posAp.y,C.z-posAp.z); double lDir = sqrt(dir.x*dir.x + dir.y*dir.y + dir.z*dir.z); dir.x = -dir.x/lDir; dir.y = -dir.y/lDir; dir.z = -dir.z/lDir; pInter.x = C.x + (baseRadius-0.04)*dir.x*(1-direction.x); pInter.y = C.y + (baseRadius-0.04)*dir.y*(1-direction.y); pInter.z = C.z + (baseRadius-0.04)*dir.z*(1-direction.z); nInter = dir; } } if(collision==true){ newPos[indexP] = make_double3(pInter.x,pInter.y,pInter.z); float r = 0; double lV = sqrt(velAp.x*velAp.x+velAp.y*velAp.y+velAp.z*velAp.z); elast = 0.3; if(lV>0 && elast>0 && dt>0){ double3 PI = make_double3(posAp.x-pInter.x,posAp.y-pInter.y,posAp.z-pInter.z); double lPI = sqrt(PI.x*PI.x + PI.y*PI.y + PI.z*PI.z); r = elast*lPI/(dt*lV); } double3 V; V.x = velAp.x - (1+r)*dot(velAp,nInter)*nInter.x; V.y = velAp.y - (1+r)*dot(velAp,nInter)*nInter.y; V.z = velAp.z - (1+r)*dot(velAp,nInter)*nInter.z; newVel[indexP] = make_double3(V.x,V.y,V.z); } } }
bffaa9521b0e7e8727f70f0a79eee86ebcbb2462.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdlib> using namespace std; void createMatrix(int* myMat,int row,int col) { for(int i=0;i<row;i++) { for(int j=0;j<col;j++) { myMat[i*col+j] = rand()%10; } } } void printMatrix(int* myMat,int row,int col) { for(int i=0;i<row;i++) { for(int j=0;j<col;j++) { cout<<myMat[i*col+j]<<" "; } cout<<endl; } } __global__ void multiplyMatrix(int* matA,int* matB,int* resultMat,int rowA,int rowB,int colB) { int i = threadIdx.x; for(int j=0;j<colB;j++) { int sum = 0; for(int k=0;k<rowB;k++) { sum+=(matA[i*rowB+k]*matB[k*colB+j]); } resultMat[i*colB+j] = sum; } } void multiplyMatrixSerial(int* matA, int* matB, int* resultMat, int rowA, int rowB, int colB) { for (int i = 0; i<rowA; i++) { for (int j = 0; j<colB; j++) { int sum = 0; for (int k = 0; k<rowB; k++) { sum += (matA[i*rowB + k] * matB[k*colB + j]); } resultMat[i*colB + j] = sum; } } } int main() { int rowA = 3; int rowB = 4; int colB = 3; //allocate memory in host int *matA = new int[rowA*rowB*sizeof(int)]; int *matB = new int[rowB*colB*sizeof(int)]; int *matC = new int[rowA*colB*sizeof(int)]; //allocate memory in device int *dA, *dB, *dC; hipMalloc((void**)&dA,rowA*rowB*sizeof(int)); hipMalloc((void**)&dB,rowB*colB*sizeof(int)); hipMalloc((void**)&dC,rowA*colB*sizeof(int)); cout<<"Creating matrix..."<<endl; createMatrix(matA,rowA,rowB); createMatrix(matB,rowB,colB); cout<<"Creating matrix completed"<<endl; //copy from host to device hipMemcpy(dA,matA,rowA*rowB*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dB,matB,rowB*colB*sizeof(int),hipMemcpyHostToDevice); cout<<"MatrixA: "<<endl; printMatrix(matA,rowA,rowB); cout<<"MatrixB: "<<endl; printMatrix(matB,rowB,colB); //each thread will compute a row of elements in result matrix hipLaunchKernelGGL(( multiplyMatrix) , dim3(1),dim3(rowA), 0, 0, dA,dB,dC,rowA,rowB,colB); //copy result from device to host hipMemcpy(matC,dC,rowA*colB*sizeof(int),hipMemcpyDeviceToHost); cout<<"The parallel result matrix is: "<<endl; printMatrix(matC,rowA,colB); multiplyMatrixSerial(matA,matB,matC,rowA,rowB,colB); cout << "The serial result matrix is: " << endl; printMatrix(matC, rowA, colB); hipFree(dA); hipFree(dB); hipFree(dC); delete[] matA; delete[] matB; delete[] matC; return 0; }
bffaa9521b0e7e8727f70f0a79eee86ebcbb2462.cu
#include <iostream> #include <cstdlib> using namespace std; void createMatrix(int* myMat,int row,int col) { for(int i=0;i<row;i++) { for(int j=0;j<col;j++) { myMat[i*col+j] = rand()%10; } } } void printMatrix(int* myMat,int row,int col) { for(int i=0;i<row;i++) { for(int j=0;j<col;j++) { cout<<myMat[i*col+j]<<" "; } cout<<endl; } } __global__ void multiplyMatrix(int* matA,int* matB,int* resultMat,int rowA,int rowB,int colB) { int i = threadIdx.x; for(int j=0;j<colB;j++) { int sum = 0; for(int k=0;k<rowB;k++) { sum+=(matA[i*rowB+k]*matB[k*colB+j]); } resultMat[i*colB+j] = sum; } } void multiplyMatrixSerial(int* matA, int* matB, int* resultMat, int rowA, int rowB, int colB) { for (int i = 0; i<rowA; i++) { for (int j = 0; j<colB; j++) { int sum = 0; for (int k = 0; k<rowB; k++) { sum += (matA[i*rowB + k] * matB[k*colB + j]); } resultMat[i*colB + j] = sum; } } } int main() { int rowA = 3; int rowB = 4; int colB = 3; //allocate memory in host int *matA = new int[rowA*rowB*sizeof(int)]; int *matB = new int[rowB*colB*sizeof(int)]; int *matC = new int[rowA*colB*sizeof(int)]; //allocate memory in device int *dA, *dB, *dC; cudaMalloc((void**)&dA,rowA*rowB*sizeof(int)); cudaMalloc((void**)&dB,rowB*colB*sizeof(int)); cudaMalloc((void**)&dC,rowA*colB*sizeof(int)); cout<<"Creating matrix..."<<endl; createMatrix(matA,rowA,rowB); createMatrix(matB,rowB,colB); cout<<"Creating matrix completed"<<endl; //copy from host to device cudaMemcpy(dA,matA,rowA*rowB*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dB,matB,rowB*colB*sizeof(int),cudaMemcpyHostToDevice); cout<<"MatrixA: "<<endl; printMatrix(matA,rowA,rowB); cout<<"MatrixB: "<<endl; printMatrix(matB,rowB,colB); //each thread will compute a row of elements in result matrix multiplyMatrix <<<1,rowA>>> (dA,dB,dC,rowA,rowB,colB); //copy result from device to host cudaMemcpy(matC,dC,rowA*colB*sizeof(int),cudaMemcpyDeviceToHost); cout<<"The parallel result matrix is: "<<endl; printMatrix(matC,rowA,colB); multiplyMatrixSerial(matA,matB,matC,rowA,rowB,colB); cout << "The serial result matrix is: " << endl; printMatrix(matC, rowA, colB); cudaFree(dA); cudaFree(dB); cudaFree(dC); delete[] matA; delete[] matB; delete[] matC; return 0; }
e0cf72675af647b88de41fe6b3d36eec31fe191e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Vector addition on the GPU: C = A + B */ #include <stdio.h> #include <stdlib.h> #define SIZE 1000000 #define BLOCKSIZE 32 // Device function (i.e. kernel) __global__ void VecAdd(float * A, float * B, float * C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if ( i < N ) { C[i] = A[i] + B[i]; } } // CPU version of the vector addition function void vecAddCPU(float * A, float * B, float * C, int N) { int i; for (i=0; i<N; i++) { C[i] = A[i] + B[i]; } } // Function compares two 1d arrays void compareVecs( float * vec1, float * vec2, int N ) { int i; int vecsEqual = 1; for (i=0; i<N; i++) { if ( abs (vec1[i] - vec2[i]) > 0.00001 ) { printf("vectors not equal! i: %d vec1[i]: %f vec2[i]: %f\n",i,vec1[i],vec2[i]); vecsEqual = 0; } } if ( vecsEqual ) printf("GPU vector addition agrees with CPU version!\n"); } /* Host function for filling vector (1d array) with random numbers between -20.0 and 20.0 */ void fillOutVector( float * vec, int vec_length ) { time_t t; srand((unsigned) time(&t)); // initialize random number generator int i; for (i=0; i<vec_length; i++) { vec[i] = ( (float)rand() / (float)(RAND_MAX) ) * 40.0; vec[i] -= 20.0; } } // Host function for printing a vector (1d array) void printVector( float * vec, int vec_length ) { int i; for (i=0; i<vec_length; i++) { printf("i: %d vec[i]: %f\n",i,vec[i]); } } // program execution begins here int main( int argc, char ** argv ) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); size_t vec_bytes = SIZE * sizeof(float); // host arrays float * h_A = (float *)malloc( vec_bytes ); float * h_B = (float *)malloc( vec_bytes ); float * h_C = (float *)malloc( vec_bytes ); // fill array with random floats fillOutVector( h_A, SIZE ); fillOutVector( h_B, SIZE ); // device arrays float * d_A, * d_B, * d_C; hipError_t rc; // return code from cuda functions rc = hipMalloc(&d_A, vec_bytes); if ( rc ) printf("Error from hipMalloc: %s\n",hipGetErrorString(rc)); hipMalloc(&d_B, vec_bytes); hipMalloc(&d_C, vec_bytes); // copy A and B to the device hipMemcpy(d_A, h_A, vec_bytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, vec_bytes, hipMemcpyHostToDevice); // dim3 is a 3-element struct with elements x, y, z (all ints) dim3 threadsPerBlock(BLOCKSIZE); dim3 blocksPerGrid( (SIZE + BLOCKSIZE - 1) / BLOCKSIZE ); // launch vector addition kernel! hipEventRecord(start); hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_A, d_B, d_C, SIZE); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("kernel time (ms) : %7.5f\n",milliseconds); // copy results to host hipMemcpy(h_C, d_C, vec_bytes, hipMemcpyDeviceToHost); //printVector( h_C, SIZE ); // verify that we got correct results float * gold_C = (float *)malloc( vec_bytes ); hipEventRecord(start); vecAddCPU( h_A, h_B, gold_C, SIZE ); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("cpu function time (ms) : %7.5f\n",milliseconds); compareVecs( gold_C, h_C, SIZE ); // clean up timer variables hipEventDestroy(start); hipEventDestroy(stop); // free memory on device hipFree(d_A); hipFree(d_B); hipFree(d_C); // free memory on host free(h_A); free(h_B); free(h_C); free(gold_C); return 0; }
e0cf72675af647b88de41fe6b3d36eec31fe191e.cu
/* Vector addition on the GPU: C = A + B */ #include <stdio.h> #include <stdlib.h> #define SIZE 1000000 #define BLOCKSIZE 32 // Device function (i.e. kernel) __global__ void VecAdd(float * A, float * B, float * C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if ( i < N ) { C[i] = A[i] + B[i]; } } // CPU version of the vector addition function void vecAddCPU(float * A, float * B, float * C, int N) { int i; for (i=0; i<N; i++) { C[i] = A[i] + B[i]; } } // Function compares two 1d arrays void compareVecs( float * vec1, float * vec2, int N ) { int i; int vecsEqual = 1; for (i=0; i<N; i++) { if ( abs (vec1[i] - vec2[i]) > 0.00001 ) { printf("vectors not equal! i: %d vec1[i]: %f vec2[i]: %f\n",i,vec1[i],vec2[i]); vecsEqual = 0; } } if ( vecsEqual ) printf("GPU vector addition agrees with CPU version!\n"); } /* Host function for filling vector (1d array) with random numbers between -20.0 and 20.0 */ void fillOutVector( float * vec, int vec_length ) { time_t t; srand((unsigned) time(&t)); // initialize random number generator int i; for (i=0; i<vec_length; i++) { vec[i] = ( (float)rand() / (float)(RAND_MAX) ) * 40.0; vec[i] -= 20.0; } } // Host function for printing a vector (1d array) void printVector( float * vec, int vec_length ) { int i; for (i=0; i<vec_length; i++) { printf("i: %d vec[i]: %f\n",i,vec[i]); } } // program execution begins here int main( int argc, char ** argv ) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); size_t vec_bytes = SIZE * sizeof(float); // host arrays float * h_A = (float *)malloc( vec_bytes ); float * h_B = (float *)malloc( vec_bytes ); float * h_C = (float *)malloc( vec_bytes ); // fill array with random floats fillOutVector( h_A, SIZE ); fillOutVector( h_B, SIZE ); // device arrays float * d_A, * d_B, * d_C; cudaError_t rc; // return code from cuda functions rc = cudaMalloc(&d_A, vec_bytes); if ( rc ) printf("Error from cudaMalloc: %s\n",cudaGetErrorString(rc)); cudaMalloc(&d_B, vec_bytes); cudaMalloc(&d_C, vec_bytes); // copy A and B to the device cudaMemcpy(d_A, h_A, vec_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, vec_bytes, cudaMemcpyHostToDevice); // dim3 is a 3-element struct with elements x, y, z (all ints) dim3 threadsPerBlock(BLOCKSIZE); dim3 blocksPerGrid( (SIZE + BLOCKSIZE - 1) / BLOCKSIZE ); // launch vector addition kernel! cudaEventRecord(start); VecAdd<<< blocksPerGrid, threadsPerBlock >>>(d_A, d_B, d_C, SIZE); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("kernel time (ms) : %7.5f\n",milliseconds); // copy results to host cudaMemcpy(h_C, d_C, vec_bytes, cudaMemcpyDeviceToHost); //printVector( h_C, SIZE ); // verify that we got correct results float * gold_C = (float *)malloc( vec_bytes ); cudaEventRecord(start); vecAddCPU( h_A, h_B, gold_C, SIZE ); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("cpu function time (ms) : %7.5f\n",milliseconds); compareVecs( gold_C, h_C, SIZE ); // clean up timer variables cudaEventDestroy(start); cudaEventDestroy(stop); // free memory on device cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // free memory on host free(h_A); free(h_B); free(h_C); free(gold_C); return 0; }
ad120ed9f051c0034e306946618d90621bc92552.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from zgeellrtmv.cu normal z -> c, Sun May 3 11:22:58 2015 */ #include "common_magma.h" //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void cgeellrtmv_kernel_32( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void cgeellrtmv_kernel_16( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void cgeellrtmv_kernel_8( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLRT. The ideas are taken from "Improving the performance of the sparse matrix vector product with GPUs", (CIT 2010), and modified to provide correct values. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows @param[in] n magma_int_t number of columns @param[in] nnz_per_row magma_int_t max number of nonzeros in a row @param[in] alpha magmaFloatComplex scalar alpha @param[in] dval magmaFloatComplex_ptr val array @param[in] dcolind magmaIndex_ptr col indices @param[in] drowlength magmaIndex_ptr number of elements in each row @param[in] dx magmaFloatComplex_ptr input vector x @param[in] beta magmaFloatComplex scalar beta @param[out] dy magmaFloatComplex_ptr output vector y @param[in] blocksize magma_int_t threads per block @param[in] alignment magma_int_t threads assigned to each row @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgeellrtmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaFloatComplex alpha, magmaFloatComplex_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaFloatComplex_ptr dx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_int_t alignment, magma_int_t blocksize, magma_queue_t queue ) { int num_blocks = magma_ceildiv( m, blocksize ); magma_int_t num_threads = alignment*blocksize; magma_int_t threads = alignment*blocksize; int real_row_length = magma_roundup( nnz_per_row, alignment ); magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = (int) sqrt( (float) num_blocks ); int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 ); dim3 grid( dimgrid1, dimgrid2, 1); int Ms = alignment * blocksize * sizeof( magmaFloatComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( alignment == 32 ) { hipLaunchKernelGGL(( cgeellrtmv_kernel_32), dim3(grid), dim3(threads) , Ms, queue , m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 16 ) { hipLaunchKernelGGL(( cgeellrtmv_kernel_16), dim3(grid), dim3(threads) , Ms, queue , m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 8 ) { hipLaunchKernelGGL(( cgeellrtmv_kernel_8), dim3(grid), dim3(threads) , Ms, queue , m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } return MAGMA_SUCCESS; }
ad120ed9f051c0034e306946618d90621bc92552.cu
/* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from zgeellrtmv.cu normal z -> c, Sun May 3 11:22:58 2015 */ #include "common_magma.h" //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void cgeellrtmv_kernel_32( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void cgeellrtmv_kernel_16( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void cgeellrtmv_kernel_8( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = magma_ceildiv( drowlength[i], T ); // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLRT. The ideas are taken from "Improving the performance of the sparse matrix vector product with GPUs", (CIT 2010), and modified to provide correct values. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows @param[in] n magma_int_t number of columns @param[in] nnz_per_row magma_int_t max number of nonzeros in a row @param[in] alpha magmaFloatComplex scalar alpha @param[in] dval magmaFloatComplex_ptr val array @param[in] dcolind magmaIndex_ptr col indices @param[in] drowlength magmaIndex_ptr number of elements in each row @param[in] dx magmaFloatComplex_ptr input vector x @param[in] beta magmaFloatComplex scalar beta @param[out] dy magmaFloatComplex_ptr output vector y @param[in] blocksize magma_int_t threads per block @param[in] alignment magma_int_t threads assigned to each row @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgeellrtmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaFloatComplex alpha, magmaFloatComplex_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaFloatComplex_ptr dx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_int_t alignment, magma_int_t blocksize, magma_queue_t queue ) { int num_blocks = magma_ceildiv( m, blocksize ); magma_int_t num_threads = alignment*blocksize; magma_int_t threads = alignment*blocksize; int real_row_length = magma_roundup( nnz_per_row, alignment ); magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = (int) sqrt( (float) num_blocks ); int dimgrid2 = magma_ceildiv( num_blocks, dimgrid1 ); dim3 grid( dimgrid1, dimgrid2, 1); int Ms = alignment * blocksize * sizeof( magmaFloatComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( alignment == 32 ) { cgeellrtmv_kernel_32<<< grid, threads , Ms, queue >>> ( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 16 ) { cgeellrtmv_kernel_16<<< grid, threads , Ms, queue >>> ( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 8 ) { cgeellrtmv_kernel_8<<< grid, threads , Ms, queue >>> ( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } return MAGMA_SUCCESS; }
0ab723491dcc87688cdbcc2f680f6c5fdddb25ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void addVectors( int size, float *d_a, float *d_b, float *d_c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < size) { d_c[tid] = d_a[tid] + d_b[tid]; } }
0ab723491dcc87688cdbcc2f680f6c5fdddb25ca.cu
#include "includes.h" __global__ void addVectors( int size, float *d_a, float *d_b, float *d_c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < size) { d_c[tid] = d_a[tid] + d_b[tid]; } }
10d7de9a6318e0965cf6f4a3cb6835d9dd6e92ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* G Constant. Copyright (C) 2011 Edgard Nikitiuk <[email protected]> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _GET_GCONST_ #define _GET_GCONST_ #include "gsa.h" __global__ void get_gconst( int i, int max_i, float alpha, float g0, float *g ) { *g = ( g0 * exp(-alpha * i/max_i) ); } #endif
10d7de9a6318e0965cf6f4a3cb6835d9dd6e92ae.cu
/* G Constant. Copyright (C) 2011 Edgard Nikitiuk <[email protected]> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _GET_GCONST_ #define _GET_GCONST_ #include "gsa.h" __global__ void get_gconst( int i, int max_i, float alpha, float g0, float *g ) { *g = ( g0 * exp(-alpha * i/max_i) ); } #endif
e48c9fe88dde7dc269c0fcee2172c0db5e656c16.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "MyInc.h" #include "main.h" int main(int argc, char ** argv) { int nbtest = 0 ; int SizeA = 800 ; // Vertical ~ Y int SizeB = 200 ; // Horizontal ~ x int nbthread = 512 ; int maxTest = 2 * (int) RemplirEnd ; hipError_t errCuda ; // Info sur la carte getInfoCuda() ; // Hello enum EnumMerge AlgoMerge = TriMergePath_1024_shared ; // On choisi le nom de l'algo qu'on veut essayer // Rappel : //TriMergeSimpleHOST : en squentiel : algo A //TriMergePathHOST : en squentiel algo B //TriMergePathGPU_1024 : question 1 mmoire globale //TriMergePath_1024_shared : question 1 mmoire shared //TriWindowsGPU_Para : question 2 // La question 3 et 5 sont dans des fichiers spares // On fait quelques calculs, mme si non utiliss par l'algo cible int NbDiagonale = (SizeA + SizeB) / nbthread ; int NbWindows = NbDiagonale ; // Nombre de trie faire, c'est le nombre de diagonale +1, sauf si la dernire diagonale est sur // le coin en bas droite NbWindows += (((SizeA + SizeB) % nbthread) == 0)?0:1 ; // Allocation dynamique Sur le host allocVecteur(SizeA, SizeB, nbthread) ; // Vrifie que la carte et les dimensions sont compatibles avec la carte switch(AlgoMerge) { case TriMergePathGPU_1024: if ((SizeA + SizeB) > 1024) { printf("TriMergePathGPU_1024 SizeA + SizeB %d > 1024\n",SizeA + SizeB) ; exit(0) ; } break ; case TriMergePath_1024_shared: if ((SizeA + SizeB) > 1024) { printf("TriMergePath_1024_shared SizeA + SizeB %d > 1024\n",SizeA + SizeB) ; exit(0) ; } break ; } // Pour chaque gnration disponible for (int e = 0 ; e < (int)RemplirEnd ; e++) { nbtest ++ ; printf("CardA %d - CardB %d - NBThread %d - Algo de remplissage %s - %s\n", SizeA, SizeB, nbthread, MSGRemplir[e],MSGMerge[AlgoMerge]) ; initVecteur(HostVecteurA, SizeA, HostVecteurB, SizeB, HostVecteurC, (enum EnumTypeRemplissage) e, 1925) ; if (hipSuccess != (errCuda = hipMemcpy(CudaVecteurA, HostVecteurA, SizeA * sizeof(TYPE), hipMemcpyHostToDevice))) { printf("PB copie HostA -> cudaA - %d - %s\n",errCuda,hipGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } if (hipSuccess != (errCuda = hipMemcpy(CudaVecteurB, HostVecteurB, SizeB * sizeof(TYPE), hipMemcpyHostToDevice))) { printf("PB copie HostB -> cudaB - %d - %s\n",errCuda,hipGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } // Le resultat est le meme quelque soit l'ordre dans lequel on fait le tri. MergeSimpleHOST(HostVecteurA, HostVecteurB, HostVecteurD, SizeA, SizeB) ; switch(AlgoMerge) { case TriMergeSimpleHOST: MergeSimpleHOST(HostVecteurA, HostVecteurB, HostVecteurC, SizeA, SizeB) ; // 1 thread pour 1 grille break ; case TriMergePathHOST: MergePathHOST(HostVecteurA, HostVecteurB, HostVecteurC, SizeA, SizeB); break ; case TriMergePathGPU_1024: hipLaunchKernelGGL(( MergePathGPU_1024), dim3(1),dim3(SizeA+SizeB), 0, 0, CudaVecteurA, CudaVecteurB, CudaVecteurC, SizeA, SizeB) ; if (hipSuccess != (errCuda = hipMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), hipMemcpyDeviceToHost))) { printf("Error copie cuda C -> host C - %d - %s\n",errCuda,hipGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; case TriMergePath_1024_shared: hipLaunchKernelGGL(( MergePathGPU_1024_shared), dim3(1),dim3(SizeA+SizeB), (SizeA+SizeB) * sizeof (TYPE), 0, CudaVecteurA, CudaVecteurB, CudaVecteurC, SizeA, SizeB) ; if (hipSuccess != (errCuda = hipMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), hipMemcpyDeviceToHost))) { printf("Error copie cuda C -> host C - %d - %s\n",errCuda,hipGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; case TriWindowsGPU_Para: MergeWindowsGPU(CudaVecteurA, CudaVecteurB, CudaVecteurC, SizeA, SizeB , CudaDiagAy , CudaDiagBx , HostDiagAy, HostDiagBx , HostVecteurA , HostVecteurB, HostVecteurC, nbthread, NbDiagonale, NbWindows) ; if (hipSuccess != (errCuda = hipMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), hipMemcpyDeviceToHost))) { printf("Error copie cuda C -> host C - %d - %s\n",errCuda,hipGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; } printf("Verif A versus B %d / %d \n",nbtest, maxTest) ; if (compare(HostVecteurD, HostVecteurC, SizeA + SizeB) != 0) { printf("Errorr in %d / %d \n",nbtest, maxTest) ; printf("Vect En erreur\n") ; // Affiche((char *)"VectC", HostVecteurC, SizeA+SizeB) ; exit(0) ; } nbtest ++ ; // Pour trier 2 fois A et B, pour vrier que a marche dans les 2 sens printf("Card First %d - Card Second %d - NBThread %d - Algo de remplissage %s - %s\n", SizeB, SizeA, nbthread, MSGRemplir[e],MSGMerge[AlgoMerge]) ; switch(AlgoMerge) { case TriMergeSimpleHOST: MergeSimpleHOST(HostVecteurB, HostVecteurA, HostVecteurC, SizeB, SizeA) ; // 1 thread pour 1 grille break ; case TriMergePathHOST: MergePathHOST(HostVecteurB, HostVecteurA, HostVecteurC, SizeB, SizeA); break ; case TriMergePathGPU_1024: hipLaunchKernelGGL(( MergePathGPU_1024), dim3(1),dim3(SizeA+SizeB), 0, 0, CudaVecteurB, CudaVecteurA, CudaVecteurC, SizeB, SizeA); if (hipSuccess != (errCuda = hipMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), hipMemcpyDeviceToHost))) { printf("PB copie 2 cuda C -> host C - %d - %s\n",errCuda,hipGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; case TriMergePath_1024_shared: hipLaunchKernelGGL(( MergePathGPU_1024_shared), dim3(1),dim3(SizeA+SizeB), (SizeA+SizeB) * sizeof (TYPE), 0, CudaVecteurB, CudaVecteurA, CudaVecteurC, SizeB, SizeA) ; if (hipSuccess != (errCuda = hipMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), hipMemcpyDeviceToHost))) { printf("Error copie cuda C -> host C - %d - %s\n",errCuda,hipGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; case TriWindowsGPU_Para: MergeWindowsGPU(CudaVecteurA, CudaVecteurB, CudaVecteurC, SizeA, SizeB , CudaDiagAy , CudaDiagBx , HostDiagAy, HostDiagBx , HostVecteurA , HostVecteurB, HostVecteurC, nbthread, NbDiagonale, NbWindows) ; if (hipSuccess != (errCuda = hipMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), hipMemcpyDeviceToHost))) { printf("Error copie cuda C -> host C - %d - %s\n",errCuda,hipGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; } printf("Verif B versus A %d / %d \n",nbtest, maxTest) ; if (compare(HostVecteurD, HostVecteurC, SizeA + SizeB) != 0) { printf("PHL Erreur in %d - \n",nbtest) ; exit(0) ; } } printf("NB test %d / %d Pour %s\n",nbtest, maxTest, MSGMerge[AlgoMerge]) ; cleanup() ; printf("Bye Bye\n") ; return 0 ; }
e48c9fe88dde7dc269c0fcee2172c0db5e656c16.cu
#include <stdio.h> #include <stdlib.h> #include "MyInc.h" #include "main.h" int main(int argc, char ** argv) { int nbtest = 0 ; int SizeA = 800 ; // Vertical ~ Y int SizeB = 200 ; // Horizontal ~ x int nbthread = 512 ; int maxTest = 2 * (int) RemplirEnd ; cudaError_t errCuda ; // Info sur la carte getInfoCuda() ; // Hello enum EnumMerge AlgoMerge = TriMergePath_1024_shared ; // On choisi le nom de l'algo qu'on veut essayer // Rappel : //TriMergeSimpleHOST : en séquentiel : algo A //TriMergePathHOST : en séquentiel algo B //TriMergePathGPU_1024 : question 1 mémoire globale //TriMergePath_1024_shared : question 1 mémoire shared //TriWindowsGPU_Para : question 2 // La question 3 et 5 sont dans des fichiers sépares // On fait quelques calculs, même si non utilisés par l'algo cible int NbDiagonale = (SizeA + SizeB) / nbthread ; int NbWindows = NbDiagonale ; // Nombre de trie à faire, c'est le nombre de diagonale +1, sauf si la dernière diagonale est sur // le coin en bas à droite NbWindows += (((SizeA + SizeB) % nbthread) == 0)?0:1 ; // Allocation dynamique Sur le host allocVecteur(SizeA, SizeB, nbthread) ; // Vérifie que la carte et les dimensions sont compatibles avec la carte switch(AlgoMerge) { case TriMergePathGPU_1024: if ((SizeA + SizeB) > 1024) { printf("TriMergePathGPU_1024 SizeA + SizeB %d > 1024\n",SizeA + SizeB) ; exit(0) ; } break ; case TriMergePath_1024_shared: if ((SizeA + SizeB) > 1024) { printf("TriMergePath_1024_shared SizeA + SizeB %d > 1024\n",SizeA + SizeB) ; exit(0) ; } break ; } // Pour chaque génération disponible for (int e = 0 ; e < (int)RemplirEnd ; e++) { nbtest ++ ; printf("CardA %d - CardB %d - NBThread %d - Algo de remplissage %s - %s\n", SizeA, SizeB, nbthread, MSGRemplir[e],MSGMerge[AlgoMerge]) ; initVecteur(HostVecteurA, SizeA, HostVecteurB, SizeB, HostVecteurC, (enum EnumTypeRemplissage) e, 1925) ; if (cudaSuccess != (errCuda = cudaMemcpy(CudaVecteurA, HostVecteurA, SizeA * sizeof(TYPE), cudaMemcpyHostToDevice))) { printf("PB copie HostA -> cudaA - %d - %s\n",errCuda,cudaGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } if (cudaSuccess != (errCuda = cudaMemcpy(CudaVecteurB, HostVecteurB, SizeB * sizeof(TYPE), cudaMemcpyHostToDevice))) { printf("PB copie HostB -> cudaB - %d - %s\n",errCuda,cudaGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } // Le resultat est le meme quelque soit l'ordre dans lequel on fait le tri. MergeSimpleHOST(HostVecteurA, HostVecteurB, HostVecteurD, SizeA, SizeB) ; switch(AlgoMerge) { case TriMergeSimpleHOST: MergeSimpleHOST(HostVecteurA, HostVecteurB, HostVecteurC, SizeA, SizeB) ; // 1 thread pour 1 grille break ; case TriMergePathHOST: MergePathHOST(HostVecteurA, HostVecteurB, HostVecteurC, SizeA, SizeB); break ; case TriMergePathGPU_1024: MergePathGPU_1024<<<1,SizeA+SizeB>>>(CudaVecteurA, CudaVecteurB, CudaVecteurC, SizeA, SizeB) ; if (cudaSuccess != (errCuda = cudaMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), cudaMemcpyDeviceToHost))) { printf("Error copie cuda C -> host C - %d - %s\n",errCuda,cudaGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; case TriMergePath_1024_shared: MergePathGPU_1024_shared<<<1,SizeA+SizeB, (SizeA+SizeB) * sizeof (TYPE)>>>(CudaVecteurA, CudaVecteurB, CudaVecteurC, SizeA, SizeB) ; if (cudaSuccess != (errCuda = cudaMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), cudaMemcpyDeviceToHost))) { printf("Error copie cuda C -> host C - %d - %s\n",errCuda,cudaGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; case TriWindowsGPU_Para: MergeWindowsGPU(CudaVecteurA, CudaVecteurB, CudaVecteurC, SizeA, SizeB , CudaDiagAy , CudaDiagBx , HostDiagAy, HostDiagBx , HostVecteurA , HostVecteurB, HostVecteurC, nbthread, NbDiagonale, NbWindows) ; if (cudaSuccess != (errCuda = cudaMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), cudaMemcpyDeviceToHost))) { printf("Error copie cuda C -> host C - %d - %s\n",errCuda,cudaGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; } printf("Verif A versus B %d / %d \n",nbtest, maxTest) ; if (compare(HostVecteurD, HostVecteurC, SizeA + SizeB) != 0) { printf("Errorr in %d / %d \n",nbtest, maxTest) ; printf("Vect En erreur\n") ; // Affiche((char *)"VectC", HostVecteurC, SizeA+SizeB) ; exit(0) ; } nbtest ++ ; // Pour trier 2 fois A et B, pour vérier que ça marche dans les 2 sens printf("Card First %d - Card Second %d - NBThread %d - Algo de remplissage %s - %s\n", SizeB, SizeA, nbthread, MSGRemplir[e],MSGMerge[AlgoMerge]) ; switch(AlgoMerge) { case TriMergeSimpleHOST: MergeSimpleHOST(HostVecteurB, HostVecteurA, HostVecteurC, SizeB, SizeA) ; // 1 thread pour 1 grille break ; case TriMergePathHOST: MergePathHOST(HostVecteurB, HostVecteurA, HostVecteurC, SizeB, SizeA); break ; case TriMergePathGPU_1024: MergePathGPU_1024<<<1,SizeA+SizeB>>>(CudaVecteurB, CudaVecteurA, CudaVecteurC, SizeB, SizeA); if (cudaSuccess != (errCuda = cudaMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), cudaMemcpyDeviceToHost))) { printf("PB copie 2 cuda C -> host C - %d - %s\n",errCuda,cudaGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; case TriMergePath_1024_shared: MergePathGPU_1024_shared<<<1,SizeA+SizeB, (SizeA+SizeB) * sizeof (TYPE)>>>(CudaVecteurB, CudaVecteurA, CudaVecteurC, SizeB, SizeA) ; if (cudaSuccess != (errCuda = cudaMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), cudaMemcpyDeviceToHost))) { printf("Error copie cuda C -> host C - %d - %s\n",errCuda,cudaGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; case TriWindowsGPU_Para: MergeWindowsGPU(CudaVecteurA, CudaVecteurB, CudaVecteurC, SizeA, SizeB , CudaDiagAy , CudaDiagBx , HostDiagAy, HostDiagBx , HostVecteurA , HostVecteurB, HostVecteurC, nbthread, NbDiagonale, NbWindows) ; if (cudaSuccess != (errCuda = cudaMemcpy(HostVecteurC, CudaVecteurC, (SizeA + SizeB) * sizeof(TYPE), cudaMemcpyDeviceToHost))) { printf("Error copie cuda C -> host C - %d - %s\n",errCuda,cudaGetErrorName(errCuda)) ; cleanup() ; exit(2) ; } break ; } printf("Verif B versus A %d / %d \n",nbtest, maxTest) ; if (compare(HostVecteurD, HostVecteurC, SizeA + SizeB) != 0) { printf("PHL Erreur in %d - \n",nbtest) ; exit(0) ; } } printf("NB test %d / %d Pour %s\n",nbtest, maxTest, MSGMerge[AlgoMerge]) ; cleanup() ; printf("Bye Bye\n") ; return 0 ; }
7d4be340aee8280981ed957f1e98503f40cdcaf3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016 Kashtanova Anna Viktorovna Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "acceleration.cuh" #include "algorithms.cuh" #include "helper.cuh" __host__ __device__ Boundaries::Boundaries(){ for (uint8_t i = 0; i < nNormals; ++i) distances[i][0] = kInfinity, distances[i][1] = -kInfinity; } __host__ __device__ bool Boundaries::hit(Ray &ray, float (&precomPuted)[2][7], uint8_t *planeIndex) const { //here ray.t is closest plane hitPoint and ray.tNearest is the other side planes hit point for (uint8_t i = 0; i < nNormals; ++i) { float tn = (distances[i][0] - precomPuted[0][i]) / precomPuted[1][i]; float tf = (distances[i][1] - precomPuted[0][i]) / precomPuted[1][i]; if (precomPuted[1][i] < 0) algorithms::swap(tn, tf); if (tn > ray.t) ray.t = tn, *planeIndex = i; if (tf < ray.tNearest) ray.tNearest = tf; if (ray.t > ray.tNearest) return false; } return true; } __host__ __device__ const float& Boundaries::at(size_t planePairNumber, size_t farOrClose) const{// must throw an exception return distances[planePairNumber][farOrClose]; } #ifdef __ACCELERATION2 template <class T> __global__ void compute_min_max(T * elements, compare comp, size_t size, T* result){ __shared__ T tmp [ threadsInblock ]; // memset(tmp, kInfinity, threadsInblock*sizeof(T) ); int tid = threadIdx.x + blockDim.x*blockIdx.x; tmp[threadIdx.x] = kInfinity; if(tid < size){ tmp[ threadIdx.x ] = elements[ tid ]; __syncthreads(); for( int i = 2; i <= threadsInblock; i*=2 ) { if( !(threadIdx.x % i) ){ if( comp(tmp[threadIdx.x + i/2], tmp[threadIdx.x] ) && (threadIdx.x + i) <= threadsInblock ) { swap(tmp[threadIdx.x], tmp[threadIdx.x + i/2]); } } } __syncthreads(); if( threadIdx.x == 0 ) result[ blockIdx.x ] = tmp[0]; } } float acceleratedMinMax(float *gpu_elements, size_t size, compare& dev_compare, compare cpu_compare){ float *result; //int size = sizeof(elements)/ sizeof(elements[0]); int sresult = (size + threadsInblock - 1)/threadsInblock; dim3 thrs(threadsInblock); dim3 blcs(sresult); //allocateGPU(&gpu_array, size*sizeof(float) ); //copyToDevice(gpu_array, &elements[0], size*sizeof(float) ); allocateGPU(&result, sresult*sizeof(float) ); compare comp; checkError( hipMemcpyFromSymbol( &comp, dev_compare, sizeof(compare) ) ); hipLaunchKernelGGL(( compute_min_max<float>) , dim3(blcs), dim3(thrs) , 0, 0, //gpu_array gpu_elements , comp , size , result); float* cpu_result = new float[sresult]; copyFromDevice( &cpu_result[0], result, sresult * sizeof( float ) ); float k = cpu_result[0]; for(int i = 0; i < sresult; ++i) { if(cpu_compare(cpu_result[i], k) && cpu_result[i] < kInfinity) k = cpu_result[i]; } delete[] cpu_result; //checkError( hipFree(gpu_array) ); checkError( hipFree(result) ); return k; } #endif
7d4be340aee8280981ed957f1e98503f40cdcaf3.cu
/* Copyright 2016 Kashtanova Anna Viktorovna Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "acceleration.cuh" #include "algorithms.cuh" #include "helper.cuh" __host__ __device__ Boundaries::Boundaries(){ for (uint8_t i = 0; i < nNormals; ++i) distances[i][0] = kInfinity, distances[i][1] = -kInfinity; } __host__ __device__ bool Boundaries::hit(Ray &ray, float (&precomPuted)[2][7], uint8_t *planeIndex) const { //here ray.t is closest plane hitPoint and ray.tNearest is the other side planes hit point for (uint8_t i = 0; i < nNormals; ++i) { float tn = (distances[i][0] - precomPuted[0][i]) / precomPuted[1][i]; float tf = (distances[i][1] - precomPuted[0][i]) / precomPuted[1][i]; if (precomPuted[1][i] < 0) algorithms::swap(tn, tf); if (tn > ray.t) ray.t = tn, *planeIndex = i; if (tf < ray.tNearest) ray.tNearest = tf; if (ray.t > ray.tNearest) return false; } return true; } __host__ __device__ const float& Boundaries::at(size_t planePairNumber, size_t farOrClose) const{// must throw an exception return distances[planePairNumber][farOrClose]; } #ifdef __ACCELERATION2 template <class T> __global__ void compute_min_max(T * elements, compare comp, size_t size, T* result){ __shared__ T tmp [ threadsInblock ]; // memset(tmp, kInfinity, threadsInblock*sizeof(T) ); int tid = threadIdx.x + blockDim.x*blockIdx.x; tmp[threadIdx.x] = kInfinity; if(tid < size){ tmp[ threadIdx.x ] = elements[ tid ]; __syncthreads(); for( int i = 2; i <= threadsInblock; i*=2 ) { if( !(threadIdx.x % i) ){ if( comp(tmp[threadIdx.x + i/2], tmp[threadIdx.x] ) && (threadIdx.x + i) <= threadsInblock ) { swap(tmp[threadIdx.x], tmp[threadIdx.x + i/2]); } } } __syncthreads(); if( threadIdx.x == 0 ) result[ blockIdx.x ] = tmp[0]; } } float acceleratedMinMax(float *gpu_elements, size_t size, compare& dev_compare, compare cpu_compare){ float *result; //int size = sizeof(elements)/ sizeof(elements[0]); int sresult = (size + threadsInblock - 1)/threadsInblock; dim3 thrs(threadsInblock); dim3 blcs(sresult); //allocateGPU(&gpu_array, size*sizeof(float) ); //copyToDevice(gpu_array, &elements[0], size*sizeof(float) ); allocateGPU(&result, sresult*sizeof(float) ); compare comp; checkError( cudaMemcpyFromSymbol( &comp, dev_compare, sizeof(compare) ) ); compute_min_max<float> <<< blcs, thrs >>>(//gpu_array gpu_elements , comp , size , result); float* cpu_result = new float[sresult]; copyFromDevice( &cpu_result[0], result, sresult * sizeof( float ) ); float k = cpu_result[0]; for(int i = 0; i < sresult; ++i) { if(cpu_compare(cpu_result[i], k) && cpu_result[i] < kInfinity) k = cpu_result[i]; } delete[] cpu_result; //checkError( cudaFree(gpu_array) ); checkError( cudaFree(result) ); return k; } #endif
da3a84e7be4f20aad3385bd92d3381c0af818cb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> // #include <torch/extension.h> #include <torch/serialize/tensor.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include "common.h" #include "device_tensor.h" namespace { template <typename DType, typename Acctype, typename DeviceTensor3> struct GradOp { __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) : beta(m), output(i), gradOutput(g) {} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = gradOutput[batch][plane][n]; DType c = ScalarConvert<Acctype, DType>::to(output[batch][plane][n] - beta); return Float2<DType, Acctype>(g, g * c); } const Acctype beta; const DeviceTensor3 output; const DeviceTensor3 gradOutput; }; template <typename DType, typename Acctype> struct SumOp { __device__ SumOp(DeviceTensor<DType, 3> i) : input(i){} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = input[batch][plane][n]; return Float2<DType, Acctype>(g, g * g); } DType mean; DeviceTensor<DType, 3> input; }; // Sum across (batch, x/y/z) applying Op() pointwise template<typename T, typename Op, typename DeviceTensor3> __device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <typename DType> __global__ void BatchNorm_Forward_kernel ( DeviceTensor<DType, 3> output, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta) { int c = blockIdx.x; /* main operation */ for (int b = 0; b < input.getSize(0); ++b) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { DType inp = input[b][c][x]; output[b][c][x] = gamma[c] * (inp - mean[c]) / std[c] + beta[c]; } } } template <typename DType> __global__ void BatchNorm_Forward_Inp_kernel ( DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta) { int c = blockIdx.x; /* main operation */ for (int b = 0; b < input.getSize(0); ++b) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { DType inp = input[b][c][x]; input[b][c][x] = gamma[c] * (inp - mean[c]) / std[c] + beta[c]; } } } template <typename DType> __global__ void BatchNorm_Backward_Inp_kernel ( DeviceTensor<DType, 3> gradoutput, DeviceTensor<DType, 3> output, DeviceTensor<DType, 3> gradinput, DeviceTensor<DType, 1> gradgamma, DeviceTensor<DType, 1> gradbeta, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta, DeviceTensor<DType, 1> gradEx, DeviceTensor<DType, 1> gradExs) { /* declarations of the variables */ /* Get the index and channels */ int c = blockIdx.x; /* main operation */ GradOp<DType, DType, DeviceTensor<DType, 3>> g(beta[c], output, gradoutput); Float2<DType, DType> res = reduce<Float2<DType, DType>, GradOp<DType, DType, DeviceTensor<DType, 3>>, DeviceTensor<DType, 3>>(g, gradoutput, c); DType gradOutputSum = res.v1; DType dotP = res.v2; DType invstd = DType(1.0) / std[c]; DType gradScale = invstd * gamma[c]; if (threadIdx.x == 0) { gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP; gradExs[c] = - 0.5 * invstd * invstd * dotP; } if (gradinput.numElements() > 0) { for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; } } } if (gradgamma.numElements() > 0) { if (threadIdx.x == 0) { gradgamma[c] += dotP / gamma[c]; } } if (gradbeta.numElements() > 0) { if (threadIdx.x == 0) { gradbeta[c] += gradOutputSum; } } } template <typename DType> __global__ void BatchNorm_Backward_kernel ( DeviceTensor<DType, 3> gradoutput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 3> gradinput, DeviceTensor<DType, 1> gradgamma, DeviceTensor<DType, 1> gradbeta, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta, DeviceTensor<DType, 1> gradEx, DeviceTensor<DType, 1> gradExs) { /* declarations of the variables */ /* Get the index and channels */ int c = blockIdx.x; /* main operation */ GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput); Float2<DType, DType> res = reduce<Float2<DType, DType>, GradOp<DType, DType, DeviceTensor<DType, 3>>, DeviceTensor<DType, 3>>(g, gradoutput, c); DType gradOutputSum = res.v1; DType dotP = res.v2; DType invstd = DType(1.0) / std[c]; DType gradScale = invstd * gamma[c]; if (threadIdx.x == 0) { gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale; gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale; } if (gradinput.numElements() > 0) { for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; } } } if (gradgamma.numElements() > 0) { if (threadIdx.x == 0) { gradgamma[c] += dotP * invstd; } } if (gradbeta.numElements() > 0) { if (threadIdx.x == 0) { gradbeta[c] += gradOutputSum; } } } template <typename DType> __global__ void Expectation_Forward_kernel ( DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> ex, DeviceTensor<DType, 1> exs, DType norm) { int c = blockIdx.x; /* main operation */ SumOp<DType, DType> g(input); Float2<DType, DType> res = reduce<Float2<DType, DType>, SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c); DType xsum = res.v1; DType xsquare = res.v2; if (threadIdx.x == 0) { ex[c] = xsum * norm; exs[c] = xsquare * norm; } } template <typename DType> __global__ void Expectation_Backward_kernel ( DeviceTensor<DType, 3> gradInput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> gradEx, DeviceTensor<DType, 1> gradExs, DType norm) { int c = blockIdx.x; /* main operation */ for (int batch = 0; batch < gradInput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] * input[batch][c][x] * norm; } } } template <typename DType> __global__ void Expectation_Backward_Inp_kernel ( DeviceTensor<DType, 3> gradInput, DeviceTensor<DType, 3> output, DeviceTensor<DType, 1> gradEx, DeviceTensor<DType, 1> gradExs, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta, DType norm) { int c = blockIdx.x; /* main operation */ for (int batch = 0; batch < gradInput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] * ((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm; } } } } // namespace at::Tensor BatchNorm_Forward_CUDA( const at::Tensor input_, const at::Tensor ex_, const at::Tensor exs_, const at::Tensor gamma_, const at::Tensor beta_, float eps) { auto output_ = at::zeros_like(input_); auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); /* kernel function */ hipLaunchKernelGGL(( BatchNorm_Forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, output, input, ex, std, gamma, beta); })); AT_ASSERT(hipGetLastError() == hipSuccess); return output_; } at::Tensor BatchNorm_Forward_Inp_CUDA( const at::Tensor input_, const at::Tensor ex_, const at::Tensor exs_, const at::Tensor gamma_, const at::Tensor beta_, float eps) { auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); /* kernel function */ hipLaunchKernelGGL(( BatchNorm_Forward_Inp_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, input, ex, std, gamma, beta); })); AT_ASSERT(hipGetLastError() == hipSuccess); return input_; } std::vector<at::Tensor> BatchNorm_Inp_Backward_CUDA( const at::Tensor gradoutput_, const at::Tensor output_, const at::Tensor ex_, const at::Tensor exs_, const at::Tensor gamma_, const at::Tensor beta_, float eps) { /* outputs*/ auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); auto gradinput_ = at::zeros_like(output_); auto gradgamma_ = at::zeros_like(gamma_); auto gradbeta_ = at::zeros_like(beta_); auto gradEx_ = at::zeros_like(ex_); auto gradExs_ = at::zeros_like(std_); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(output_.size(1)); dim3 threads(getNumThreads(output_.size(2))); AT_DISPATCH_FLOATING_TYPES(output_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_); DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_); DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_); DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_); DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_); DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_); /* kernel function */ hipLaunchKernelGGL(( BatchNorm_Backward_Inp_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, gradoutput, output, gradinput, gradgamma, gradbeta, ex, std, gamma, beta, gradEx, gradExs); })); AT_ASSERT(hipGetLastError() == hipSuccess); return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; } std::vector<at::Tensor> BatchNorm_Backward_CUDA( const at::Tensor gradoutput_, const at::Tensor input_, const at::Tensor ex_, const at::Tensor exs_, const at::Tensor gamma_, const at::Tensor beta_, float eps) { /* outputs*/ auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); auto gradinput_ = at::zeros_like(input_); auto gradgamma_ = at::zeros_like(gamma_); auto gradbeta_ = at::zeros_like(beta_); auto gradEx_ = at::zeros_like(ex_); auto gradExs_ = at::zeros_like(std_); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_); DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_); DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_); DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_); /* kernel function */ hipLaunchKernelGGL(( BatchNorm_Backward_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, gradoutput, input, gradinput, gradgamma, gradbeta, ex, std, gamma, beta, gradEx, gradExs); })); AT_ASSERT(hipGetLastError() == hipSuccess); return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; } std::vector<at::Tensor> Expectation_Forward_CUDA( const at::Tensor input_) { /* outputs */ auto ex_ = torch::zeros({input_.size(1)}, input_.options()); auto exs_ = torch::zeros({input_.size(1)}, input_.options()); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] { scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); /* Device tensors */ DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> exs = devicetensor<scalar_t, 1>(exs_); /* kernel function */ hipLaunchKernelGGL(( Expectation_Forward_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, input, ex, exs, norm); })); AT_ASSERT(hipGetLastError() == hipSuccess); return {ex_, exs_}; } at::Tensor Expectation_Backward_CUDA( const at::Tensor input_, const at::Tensor gradEx_, const at::Tensor gradExs_) { /* outputs */ at::Tensor gradInput_ = at::zeros_like(input_); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] { scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); /* Device tensors */ DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_); DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_); /* kernel function */ hipLaunchKernelGGL(( Expectation_Backward_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, gradInput, input, gradEx, gradExs, norm); })); AT_ASSERT(hipGetLastError() == hipSuccess); return gradInput_; } at::Tensor Expectation_Inp_Backward_CUDA( const at::Tensor gradInput_, const at::Tensor output_, const at::Tensor gradEx_, const at::Tensor gradExs_, const at::Tensor ex_, const at::Tensor exs_, const at::Tensor gamma_, const at::Tensor beta_, float eps) { /* outputs */ //auto gradInput_ = at::zeros_like(output_); auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(output_.size(1)); dim3 threads(getNumThreads(output_.size(2))); AT_DISPATCH_FLOATING_TYPES(output_.type(), "SumSquare_Backward_CUDA", ([&] { scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2)); /* Device tensors */ DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(output_); DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_); DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); /* kernel function */ hipLaunchKernelGGL(( Expectation_Backward_Inp_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, gradInput, input, gradEx, gradExs, ex, std, gamma, beta, norm); })); AT_ASSERT(hipGetLastError() == hipSuccess); return gradInput_; }
da3a84e7be4f20aad3385bd92d3381c0af818cb5.cu
#include <vector> // #include <torch/extension.h> #include <torch/serialize/tensor.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include "common.h" #include "device_tensor.h" namespace { template <typename DType, typename Acctype, typename DeviceTensor3> struct GradOp { __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) : beta(m), output(i), gradOutput(g) {} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = gradOutput[batch][plane][n]; DType c = ScalarConvert<Acctype, DType>::to(output[batch][plane][n] - beta); return Float2<DType, Acctype>(g, g * c); } const Acctype beta; const DeviceTensor3 output; const DeviceTensor3 gradOutput; }; template <typename DType, typename Acctype> struct SumOp { __device__ SumOp(DeviceTensor<DType, 3> i) : input(i){} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = input[batch][plane][n]; return Float2<DType, Acctype>(g, g * g); } DType mean; DeviceTensor<DType, 3> input; }; // Sum across (batch, x/y/z) applying Op() pointwise template<typename T, typename Op, typename DeviceTensor3> __device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <typename DType> __global__ void BatchNorm_Forward_kernel ( DeviceTensor<DType, 3> output, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta) { int c = blockIdx.x; /* main operation */ for (int b = 0; b < input.getSize(0); ++b) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { DType inp = input[b][c][x]; output[b][c][x] = gamma[c] * (inp - mean[c]) / std[c] + beta[c]; } } } template <typename DType> __global__ void BatchNorm_Forward_Inp_kernel ( DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta) { int c = blockIdx.x; /* main operation */ for (int b = 0; b < input.getSize(0); ++b) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { DType inp = input[b][c][x]; input[b][c][x] = gamma[c] * (inp - mean[c]) / std[c] + beta[c]; } } } template <typename DType> __global__ void BatchNorm_Backward_Inp_kernel ( DeviceTensor<DType, 3> gradoutput, DeviceTensor<DType, 3> output, DeviceTensor<DType, 3> gradinput, DeviceTensor<DType, 1> gradgamma, DeviceTensor<DType, 1> gradbeta, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta, DeviceTensor<DType, 1> gradEx, DeviceTensor<DType, 1> gradExs) { /* declarations of the variables */ /* Get the index and channels */ int c = blockIdx.x; /* main operation */ GradOp<DType, DType, DeviceTensor<DType, 3>> g(beta[c], output, gradoutput); Float2<DType, DType> res = reduce<Float2<DType, DType>, GradOp<DType, DType, DeviceTensor<DType, 3>>, DeviceTensor<DType, 3>>(g, gradoutput, c); DType gradOutputSum = res.v1; DType dotP = res.v2; DType invstd = DType(1.0) / std[c]; DType gradScale = invstd * gamma[c]; if (threadIdx.x == 0) { gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP; gradExs[c] = - 0.5 * invstd * invstd * dotP; } if (gradinput.numElements() > 0) { for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; } } } if (gradgamma.numElements() > 0) { if (threadIdx.x == 0) { gradgamma[c] += dotP / gamma[c]; } } if (gradbeta.numElements() > 0) { if (threadIdx.x == 0) { gradbeta[c] += gradOutputSum; } } } template <typename DType> __global__ void BatchNorm_Backward_kernel ( DeviceTensor<DType, 3> gradoutput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 3> gradinput, DeviceTensor<DType, 1> gradgamma, DeviceTensor<DType, 1> gradbeta, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta, DeviceTensor<DType, 1> gradEx, DeviceTensor<DType, 1> gradExs) { /* declarations of the variables */ /* Get the index and channels */ int c = blockIdx.x; /* main operation */ GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput); Float2<DType, DType> res = reduce<Float2<DType, DType>, GradOp<DType, DType, DeviceTensor<DType, 3>>, DeviceTensor<DType, 3>>(g, gradoutput, c); DType gradOutputSum = res.v1; DType dotP = res.v2; DType invstd = DType(1.0) / std[c]; DType gradScale = invstd * gamma[c]; if (threadIdx.x == 0) { gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale; gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale; } if (gradinput.numElements() > 0) { for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; } } } if (gradgamma.numElements() > 0) { if (threadIdx.x == 0) { gradgamma[c] += dotP * invstd; } } if (gradbeta.numElements() > 0) { if (threadIdx.x == 0) { gradbeta[c] += gradOutputSum; } } } template <typename DType> __global__ void Expectation_Forward_kernel ( DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> ex, DeviceTensor<DType, 1> exs, DType norm) { int c = blockIdx.x; /* main operation */ SumOp<DType, DType> g(input); Float2<DType, DType> res = reduce<Float2<DType, DType>, SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c); DType xsum = res.v1; DType xsquare = res.v2; if (threadIdx.x == 0) { ex[c] = xsum * norm; exs[c] = xsquare * norm; } } template <typename DType> __global__ void Expectation_Backward_kernel ( DeviceTensor<DType, 3> gradInput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> gradEx, DeviceTensor<DType, 1> gradExs, DType norm) { int c = blockIdx.x; /* main operation */ for (int batch = 0; batch < gradInput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] * input[batch][c][x] * norm; } } } template <typename DType> __global__ void Expectation_Backward_Inp_kernel ( DeviceTensor<DType, 3> gradInput, DeviceTensor<DType, 3> output, DeviceTensor<DType, 1> gradEx, DeviceTensor<DType, 1> gradExs, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta, DType norm) { int c = blockIdx.x; /* main operation */ for (int batch = 0; batch < gradInput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] * ((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm; } } } } // namespace at::Tensor BatchNorm_Forward_CUDA( const at::Tensor input_, const at::Tensor ex_, const at::Tensor exs_, const at::Tensor gamma_, const at::Tensor beta_, float eps) { auto output_ = at::zeros_like(input_); auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); /* kernel function */ BatchNorm_Forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>( output, input, ex, std, gamma, beta); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return output_; } at::Tensor BatchNorm_Forward_Inp_CUDA( const at::Tensor input_, const at::Tensor ex_, const at::Tensor exs_, const at::Tensor gamma_, const at::Tensor beta_, float eps) { auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); /* kernel function */ BatchNorm_Forward_Inp_kernel<scalar_t><<<blocks, threads, 0, stream>>>( input, ex, std, gamma, beta); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return input_; } std::vector<at::Tensor> BatchNorm_Inp_Backward_CUDA( const at::Tensor gradoutput_, const at::Tensor output_, const at::Tensor ex_, const at::Tensor exs_, const at::Tensor gamma_, const at::Tensor beta_, float eps) { /* outputs*/ auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); auto gradinput_ = at::zeros_like(output_); auto gradgamma_ = at::zeros_like(gamma_); auto gradbeta_ = at::zeros_like(beta_); auto gradEx_ = at::zeros_like(ex_); auto gradExs_ = at::zeros_like(std_); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(output_.size(1)); dim3 threads(getNumThreads(output_.size(2))); AT_DISPATCH_FLOATING_TYPES(output_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_); DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_); DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_); DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_); DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_); DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_); /* kernel function */ BatchNorm_Backward_Inp_kernel<scalar_t> <<<blocks, threads, 0, stream>>>( gradoutput, output, gradinput, gradgamma, gradbeta, ex, std, gamma, beta, gradEx, gradExs); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; } std::vector<at::Tensor> BatchNorm_Backward_CUDA( const at::Tensor gradoutput_, const at::Tensor input_, const at::Tensor ex_, const at::Tensor exs_, const at::Tensor gamma_, const at::Tensor beta_, float eps) { /* outputs*/ auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); auto gradinput_ = at::zeros_like(input_); auto gradgamma_ = at::zeros_like(gamma_); auto gradbeta_ = at::zeros_like(beta_); auto gradEx_ = at::zeros_like(ex_); auto gradExs_ = at::zeros_like(std_); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_); DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_); DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_); DeviceTensor<scalar_t, 1> gradExs = devicetensor<scalar_t, 1>(gradExs_); /* kernel function */ BatchNorm_Backward_kernel<scalar_t> <<<blocks, threads, 0, stream>>>( gradoutput, input, gradinput, gradgamma, gradbeta, ex, std, gamma, beta, gradEx, gradExs); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; } std::vector<at::Tensor> Expectation_Forward_CUDA( const at::Tensor input_) { /* outputs */ auto ex_ = torch::zeros({input_.size(1)}, input_.options()); auto exs_ = torch::zeros({input_.size(1)}, input_.options()); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] { scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); /* Device tensors */ DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> exs = devicetensor<scalar_t, 1>(exs_); /* kernel function */ Expectation_Forward_kernel<scalar_t> <<<blocks, threads, 0, stream>>>(input, ex, exs, norm); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return {ex_, exs_}; } at::Tensor Expectation_Backward_CUDA( const at::Tensor input_, const at::Tensor gradEx_, const at::Tensor gradExs_) { /* outputs */ at::Tensor gradInput_ = at::zeros_like(input_); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] { scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); /* Device tensors */ DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_); DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_); /* kernel function */ Expectation_Backward_kernel<scalar_t> <<<blocks, threads, 0, stream>>>(gradInput, input, gradEx, gradExs, norm); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return gradInput_; } at::Tensor Expectation_Inp_Backward_CUDA( const at::Tensor gradInput_, const at::Tensor output_, const at::Tensor gradEx_, const at::Tensor gradExs_, const at::Tensor ex_, const at::Tensor exs_, const at::Tensor gamma_, const at::Tensor beta_, float eps) { /* outputs */ //auto gradInput_ = at::zeros_like(output_); auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(output_.size(1)); dim3 threads(getNumThreads(output_.size(2))); AT_DISPATCH_FLOATING_TYPES(output_.type(), "SumSquare_Backward_CUDA", ([&] { scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2)); /* Device tensors */ DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(output_); DeviceTensor<scalar_t, 1> gradEx = devicetensor<scalar_t, 1>(gradEx_); DeviceTensor<scalar_t, 1> gradExs =devicetensor<scalar_t, 1>(gradExs_); DeviceTensor<scalar_t, 1> ex = devicetensor<scalar_t, 1>(ex_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); /* kernel function */ Expectation_Backward_Inp_kernel<scalar_t> <<<blocks, threads, 0, stream>>>(gradInput, input, gradEx, gradExs, ex, std, gamma, beta, norm); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return gradInput_; }
8a307e99f8ebbfb930642cca736407149d04fc43.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "timer.h" #include "kernels.h" //This is a sample generation of the input matrix. The assignment need not be evaluated on the same matrix. void fill_matrix(float *mat, int M, int N) { for(int i=0; i<M; i++) for(int j=0; j<N; j++) { mat[i*N +j] = i + 2.3f * j; } } // Print the matrix void print_matrix(float *mat, int M, int N) { for(int i=0; i < M; i++) { for(int j=0; j < N; j++) { printf("%4.3g ", mat[j + i*N]); } printf("\n"); } printf("\n"); } // Verify the correctness by comparing the sequential output with parallel output bool compare_matrices(float *gpu, float *ref, int rows, int cols) { for(int i=0; i < rows; i++) { for(int j=0; j < cols; j++) { if (ref[i*cols + j] != gpu[i*cols +j]) { return false; } } } return true; // generated output matches expected output } // Generating expected output void transpose_CPU(float in[], float out[], int M, int N) { for(int i=0; i<M; i++) for(int j=0; j<N; j++) out[j*M +i] = in[i * N + j]; } int main(int argc, char** argv) { // specify the dimensions of the input matrix const int M = 8192;//32768; // number of rows const int N = 2048;//8192; // number of columns unsigned numbytes = M * N * sizeof(float); float *in = (float *) malloc(numbytes); float *out = (float *) malloc(numbytes); float *out1 = (float *) malloc(numbytes); float *gold = (float *) malloc(numbytes); fill_matrix(in, M, N); CPUTimer cputimer; cputimer.Start(); transpose_CPU(in, gold, M, N); cputimer.Stop(); printf("The sequential code ran in %f ms\n", cputimer.Elapsed()*1000); // print_matrix(in, M, N); // printing the input matrix //print_matrix(gold, N, M); // printing expected output matrix float *d_in, *d_out1, *d_out2 ; hipError_t err; hipMalloc(&d_in, numbytes); hipMalloc(&d_out1, numbytes); hipMalloc(&d_out2, numbytes); hipMemset(d_out1, 0, numbytes); hipMemset(d_out2, 0, numbytes); hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice); GPUTimer timer; timer.Start(); // launching the kernel int numThreads = 256; int count = (M/numThreads)+1; hipLaunchKernelGGL(( transpose_parallel_per_row), dim3(count), dim3(numThreads), 0, 0, d_in, d_out1, M, N); timer.Stop(); /* Print the last error encountered -- helpful for debugging */ err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipMemcpy(out, d_out1, numbytes, hipMemcpyDeviceToHost); //print_matrix(out, N, M); printf("Result of <transpose_parallel_per_row>: %s\n",compare_matrices(out, gold, N, M) ? "Success" : "Failure"); /* Success <--> correct output */ printf("The kernel <transpose_parallel_per_row> ran in: %f ms\n", timer.Elapsed()); // Sample values for K1 and K2 (subject to change). const int K1 = 32; // should be a divisor of M const int K2 = 8; // should be a divisor of N dim3 blocks(M/K1,N/K2); // blocks per grid dim3 threads(K1, K2); // threads per block timer.Start(); // launching the kernel hipLaunchKernelGGL(( transpose_parallel_per_element), dim3(blocks),dim3(threads), 0, 0, d_in, d_out2,M,N,K1,K2); timer.Stop(); /* Print the last error encountered -- helpful for debugging */ err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipMemcpy(out1, d_out2, numbytes, hipMemcpyDeviceToHost); printf("Result of <transpose_parallel_per_element>: %s\n",compare_matrices(out1, gold, N, M) ? "Success" : "Failure");/* Success <--> correct output */ //print_matrix(out1, N, M); printf("The kernel <transpose_parallel_per_element> ran in: %f ms\n", timer.Elapsed()); return 0; }
8a307e99f8ebbfb930642cca736407149d04fc43.cu
#include <stdio.h> #include "timer.h" #include "kernels.h" //This is a sample generation of the input matrix. The assignment need not be evaluated on the same matrix. void fill_matrix(float *mat, int M, int N) { for(int i=0; i<M; i++) for(int j=0; j<N; j++) { mat[i*N +j] = i + 2.3f * j; } } // Print the matrix void print_matrix(float *mat, int M, int N) { for(int i=0; i < M; i++) { for(int j=0; j < N; j++) { printf("%4.3g ", mat[j + i*N]); } printf("\n"); } printf("\n"); } // Verify the correctness by comparing the sequential output with parallel output bool compare_matrices(float *gpu, float *ref, int rows, int cols) { for(int i=0; i < rows; i++) { for(int j=0; j < cols; j++) { if (ref[i*cols + j] != gpu[i*cols +j]) { return false; } } } return true; // generated output matches expected output } // Generating expected output void transpose_CPU(float in[], float out[], int M, int N) { for(int i=0; i<M; i++) for(int j=0; j<N; j++) out[j*M +i] = in[i * N + j]; } int main(int argc, char** argv) { // specify the dimensions of the input matrix const int M = 8192;//32768; // number of rows const int N = 2048;//8192; // number of columns unsigned numbytes = M * N * sizeof(float); float *in = (float *) malloc(numbytes); float *out = (float *) malloc(numbytes); float *out1 = (float *) malloc(numbytes); float *gold = (float *) malloc(numbytes); fill_matrix(in, M, N); CPUTimer cputimer; cputimer.Start(); transpose_CPU(in, gold, M, N); cputimer.Stop(); printf("The sequential code ran in %f ms\n", cputimer.Elapsed()*1000); // print_matrix(in, M, N); // printing the input matrix //print_matrix(gold, N, M); // printing expected output matrix float *d_in, *d_out1, *d_out2 ; cudaError_t err; cudaMalloc(&d_in, numbytes); cudaMalloc(&d_out1, numbytes); cudaMalloc(&d_out2, numbytes); cudaMemset(d_out1, 0, numbytes); cudaMemset(d_out2, 0, numbytes); cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice); GPUTimer timer; timer.Start(); // launching the kernel int numThreads = 256; int count = (M/numThreads)+1; transpose_parallel_per_row<<<count, numThreads>>>(d_in, d_out1, M, N); timer.Stop(); /* Print the last error encountered -- helpful for debugging */ err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaMemcpy(out, d_out1, numbytes, cudaMemcpyDeviceToHost); //print_matrix(out, N, M); printf("Result of <transpose_parallel_per_row>: %s\n",compare_matrices(out, gold, N, M) ? "Success" : "Failure"); /* Success <--> correct output */ printf("The kernel <transpose_parallel_per_row> ran in: %f ms\n", timer.Elapsed()); // Sample values for K1 and K2 (subject to change). const int K1 = 32; // should be a divisor of M const int K2 = 8; // should be a divisor of N dim3 blocks(M/K1,N/K2); // blocks per grid dim3 threads(K1, K2); // threads per block timer.Start(); // launching the kernel transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out2,M,N,K1,K2); timer.Stop(); /* Print the last error encountered -- helpful for debugging */ err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaMemcpy(out1, d_out2, numbytes, cudaMemcpyDeviceToHost); printf("Result of <transpose_parallel_per_element>: %s\n",compare_matrices(out1, gold, N, M) ? "Success" : "Failure");/* Success <--> correct output */ //print_matrix(out1, N, M); printf("The kernel <transpose_parallel_per_element> ran in: %f ms\n", timer.Elapsed()); return 0; }
efc022e3808a775b0ad7da2f03a795554c4ce1c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel3_xdir; int xdim0_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim0_advec_cell_kernel3_xdir; int ydim0_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim1_advec_cell_kernel3_xdir; int xdim1_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim1_advec_cell_kernel3_xdir; int ydim1_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim2_advec_cell_kernel3_xdir; int xdim2_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim2_advec_cell_kernel3_xdir; int ydim2_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim3_advec_cell_kernel3_xdir; int xdim3_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim3_advec_cell_kernel3_xdir; int ydim3_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim4_advec_cell_kernel3_xdir; int xdim4_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim4_advec_cell_kernel3_xdir; int ydim4_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim5_advec_cell_kernel3_xdir; int xdim5_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim5_advec_cell_kernel3_xdir; int ydim5_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim6_advec_cell_kernel3_xdir; int xdim6_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim6_advec_cell_kernel3_xdir; int ydim6_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim7_advec_cell_kernel3_xdir; int xdim7_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim7_advec_cell_kernel3_xdir; int ydim7_advec_cell_kernel3_xdir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_cell_kernel3_xdir * (y) + \ xdim0_advec_cell_kernel3_xdir * ydim0_advec_cell_kernel3_xdir * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_cell_kernel3_xdir * (y) + \ xdim1_advec_cell_kernel3_xdir * ydim1_advec_cell_kernel3_xdir * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_cell_kernel3_xdir * (y) + \ xdim2_advec_cell_kernel3_xdir * ydim2_advec_cell_kernel3_xdir * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_cell_kernel3_xdir * (y) + \ xdim3_advec_cell_kernel3_xdir * ydim3_advec_cell_kernel3_xdir * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_cell_kernel3_xdir * (y) + \ xdim4_advec_cell_kernel3_xdir * ydim4_advec_cell_kernel3_xdir * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_cell_kernel3_xdir * (y) + \ xdim5_advec_cell_kernel3_xdir * ydim5_advec_cell_kernel3_xdir * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_advec_cell_kernel3_xdir * (y) + \ xdim6_advec_cell_kernel3_xdir * ydim6_advec_cell_kernel3_xdir * (z)) #define OPS_ACC7(x, y, z) \ (x + xdim7_advec_cell_kernel3_xdir * (y) + \ xdim7_advec_cell_kernel3_xdir * ydim7_advec_cell_kernel3_xdir * (z)) // user function __device__ inline void advec_cell_kernel3_xdir_gpu(const double *vol_flux_x, const double *pre_vol, const int *xx, const double *vertexdx, const double *density1, const double *energy1, double *mass_flux_x, double *ener_flux) { double sigmat, sigmav, sigmam, sigma3, sigma4; double diffuw, diffdw, limiter; double one_by_six = 1.0 / 6.0; int x_max = field.x_max; int upwind, donor, downwind, dif; if (vol_flux_x[OPS_ACC0(0, 0, 0)] > 0.0) { upwind = -2; donor = -1; downwind = 0; dif = donor; } else if (xx[OPS_ACC2(1, 0, 0)] < x_max + 2 - 2) { upwind = 1; donor = 0; downwind = -1; dif = upwind; } else { upwind = 0; donor = 0; downwind = -1; dif = upwind; } sigmat = fabs(vol_flux_x[OPS_ACC0(0, 0, 0)]) / pre_vol[OPS_ACC1(donor, 0, 0)]; sigma3 = (1.0 + sigmat) * (vertexdx[OPS_ACC3(0, 0, 0)] / vertexdx[OPS_ACC3(dif, 0, 0)]); sigma4 = 2.0 - sigmat; sigmav = sigmat; diffuw = density1[OPS_ACC4(donor, 0, 0)] - density1[OPS_ACC4(upwind, 0, 0)]; diffdw = density1[OPS_ACC4(downwind, 0, 0)] - density1[OPS_ACC4(donor, 0, 0)]; if ((diffuw * diffdw) > 0.0) limiter = (1.0 - sigmav) * SIGN(1.0, diffdw) * MIN(MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter = 0.0; mass_flux_x[OPS_ACC6(0, 0, 0)] = (vol_flux_x[OPS_ACC0(0, 0, 0)]) * (density1[OPS_ACC4(donor, 0, 0)] + limiter); sigmam = fabs(mass_flux_x[OPS_ACC6(0, 0, 0)]) / (density1[OPS_ACC4(donor, 0, 0)] * pre_vol[OPS_ACC1(donor, 0, 0)]); diffuw = energy1[OPS_ACC5(donor, 0, 0)] - energy1[OPS_ACC5(upwind, 0, 0)]; diffdw = energy1[OPS_ACC5(downwind, 0, 0)] - energy1[OPS_ACC5(donor, 0, 0)]; if ((diffuw * diffdw) > 0.0) limiter = (1.0 - sigmam) * SIGN(1.0, diffdw) * MIN(MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter = 0.0; ener_flux[OPS_ACC7(0, 0, 0)] = mass_flux_x[OPS_ACC6(0, 0, 0)] * (energy1[OPS_ACC5(donor, 0, 0)] + limiter); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 __global__ void ops_advec_cell_kernel3_xdir( const double *__restrict arg0, const double *__restrict arg1, const int *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, double *__restrict arg6, double *__restrict arg7, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim0_advec_cell_kernel3_xdir * ydim0_advec_cell_kernel3_xdir; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim1_advec_cell_kernel3_xdir * ydim1_advec_cell_kernel3_xdir; arg2 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim2_advec_cell_kernel3_xdir + idx_z * 0 * 1 * xdim2_advec_cell_kernel3_xdir * ydim2_advec_cell_kernel3_xdir; arg3 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim3_advec_cell_kernel3_xdir + idx_z * 0 * 1 * xdim3_advec_cell_kernel3_xdir * ydim3_advec_cell_kernel3_xdir; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim4_advec_cell_kernel3_xdir * ydim4_advec_cell_kernel3_xdir; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim5_advec_cell_kernel3_xdir * ydim5_advec_cell_kernel3_xdir; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim6_advec_cell_kernel3_xdir * ydim6_advec_cell_kernel3_xdir; arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim7_advec_cell_kernel3_xdir * ydim7_advec_cell_kernel3_xdir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel3_xdir_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_cell_kernel3_xdir(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_advec_cell_kernel3_xdir_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 8, range, 111)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(111, "advec_cell_kernel3_xdir"); OPS_kernels[111].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]; int ydim7 = args[7].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel3_xdir_h || ydim0 != ydim0_advec_cell_kernel3_xdir_h || xdim1 != xdim1_advec_cell_kernel3_xdir_h || ydim1 != ydim1_advec_cell_kernel3_xdir_h || xdim2 != xdim2_advec_cell_kernel3_xdir_h || ydim2 != ydim2_advec_cell_kernel3_xdir_h || xdim3 != xdim3_advec_cell_kernel3_xdir_h || ydim3 != ydim3_advec_cell_kernel3_xdir_h || xdim4 != xdim4_advec_cell_kernel3_xdir_h || ydim4 != ydim4_advec_cell_kernel3_xdir_h || xdim5 != xdim5_advec_cell_kernel3_xdir_h || ydim5 != ydim5_advec_cell_kernel3_xdir_h || xdim6 != xdim6_advec_cell_kernel3_xdir_h || ydim6 != ydim6_advec_cell_kernel3_xdir_h || xdim7 != xdim7_advec_cell_kernel3_xdir_h || ydim7 != ydim7_advec_cell_kernel3_xdir_h) { hipMemcpyToSymbol(xdim0_advec_cell_kernel3_xdir, &xdim0, sizeof(int)); xdim0_advec_cell_kernel3_xdir_h = xdim0; hipMemcpyToSymbol(ydim0_advec_cell_kernel3_xdir, &ydim0, sizeof(int)); ydim0_advec_cell_kernel3_xdir_h = ydim0; hipMemcpyToSymbol(xdim1_advec_cell_kernel3_xdir, &xdim1, sizeof(int)); xdim1_advec_cell_kernel3_xdir_h = xdim1; hipMemcpyToSymbol(ydim1_advec_cell_kernel3_xdir, &ydim1, sizeof(int)); ydim1_advec_cell_kernel3_xdir_h = ydim1; hipMemcpyToSymbol(xdim2_advec_cell_kernel3_xdir, &xdim2, sizeof(int)); xdim2_advec_cell_kernel3_xdir_h = xdim2; hipMemcpyToSymbol(ydim2_advec_cell_kernel3_xdir, &ydim2, sizeof(int)); ydim2_advec_cell_kernel3_xdir_h = ydim2; hipMemcpyToSymbol(xdim3_advec_cell_kernel3_xdir, &xdim3, sizeof(int)); xdim3_advec_cell_kernel3_xdir_h = xdim3; hipMemcpyToSymbol(ydim3_advec_cell_kernel3_xdir, &ydim3, sizeof(int)); ydim3_advec_cell_kernel3_xdir_h = ydim3; hipMemcpyToSymbol(xdim4_advec_cell_kernel3_xdir, &xdim4, sizeof(int)); xdim4_advec_cell_kernel3_xdir_h = xdim4; hipMemcpyToSymbol(ydim4_advec_cell_kernel3_xdir, &ydim4, sizeof(int)); ydim4_advec_cell_kernel3_xdir_h = ydim4; hipMemcpyToSymbol(xdim5_advec_cell_kernel3_xdir, &xdim5, sizeof(int)); xdim5_advec_cell_kernel3_xdir_h = xdim5; hipMemcpyToSymbol(ydim5_advec_cell_kernel3_xdir, &ydim5, sizeof(int)); ydim5_advec_cell_kernel3_xdir_h = ydim5; hipMemcpyToSymbol(xdim6_advec_cell_kernel3_xdir, &xdim6, sizeof(int)); xdim6_advec_cell_kernel3_xdir_h = xdim6; hipMemcpyToSymbol(ydim6_advec_cell_kernel3_xdir, &ydim6, sizeof(int)); ydim6_advec_cell_kernel3_xdir_h = ydim6; hipMemcpyToSymbol(xdim7_advec_cell_kernel3_xdir, &xdim7, sizeof(int)); xdim7_advec_cell_kernel3_xdir_h = xdim7; hipMemcpyToSymbol(ydim7_advec_cell_kernel3_xdir, &ydim7, sizeof(int)); ydim7_advec_cell_kernel3_xdir_h = ydim7; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size); char *p_a[8]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; int base7 = args[7].dat->base_offset + dat7 * 1 * (start[0] * args[7].stencil->stride[0]); base7 = base7 + dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1]); base7 = base7 + dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2]); p_a[7] = (char *)args[7].data_d + base7; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[111].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_cell_kernel3_xdir), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[111].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[6], range); ops_set_halo_dirtybit3(&args[7], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[111].mpi_time += t2 - t1; OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg7); } } #ifdef OPS_LAZY void ops_par_loop_advec_cell_kernel3_xdir(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 111; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 111; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index; desc->function = ops_par_loop_advec_cell_kernel3_xdir_execute; if (OPS_diags > 1) { ops_timing_realloc(111, "advec_cell_kernel3_xdir"); } ops_enqueue_kernel(desc); } #endif
efc022e3808a775b0ad7da2f03a795554c4ce1c1.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel3_xdir; int xdim0_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim0_advec_cell_kernel3_xdir; int ydim0_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim1_advec_cell_kernel3_xdir; int xdim1_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim1_advec_cell_kernel3_xdir; int ydim1_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim2_advec_cell_kernel3_xdir; int xdim2_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim2_advec_cell_kernel3_xdir; int ydim2_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim3_advec_cell_kernel3_xdir; int xdim3_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim3_advec_cell_kernel3_xdir; int ydim3_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim4_advec_cell_kernel3_xdir; int xdim4_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim4_advec_cell_kernel3_xdir; int ydim4_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim5_advec_cell_kernel3_xdir; int xdim5_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim5_advec_cell_kernel3_xdir; int ydim5_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim6_advec_cell_kernel3_xdir; int xdim6_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim6_advec_cell_kernel3_xdir; int ydim6_advec_cell_kernel3_xdir_h = -1; __constant__ int xdim7_advec_cell_kernel3_xdir; int xdim7_advec_cell_kernel3_xdir_h = -1; __constant__ int ydim7_advec_cell_kernel3_xdir; int ydim7_advec_cell_kernel3_xdir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_cell_kernel3_xdir * (y) + \ xdim0_advec_cell_kernel3_xdir * ydim0_advec_cell_kernel3_xdir * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_cell_kernel3_xdir * (y) + \ xdim1_advec_cell_kernel3_xdir * ydim1_advec_cell_kernel3_xdir * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_cell_kernel3_xdir * (y) + \ xdim2_advec_cell_kernel3_xdir * ydim2_advec_cell_kernel3_xdir * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_cell_kernel3_xdir * (y) + \ xdim3_advec_cell_kernel3_xdir * ydim3_advec_cell_kernel3_xdir * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_cell_kernel3_xdir * (y) + \ xdim4_advec_cell_kernel3_xdir * ydim4_advec_cell_kernel3_xdir * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_cell_kernel3_xdir * (y) + \ xdim5_advec_cell_kernel3_xdir * ydim5_advec_cell_kernel3_xdir * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_advec_cell_kernel3_xdir * (y) + \ xdim6_advec_cell_kernel3_xdir * ydim6_advec_cell_kernel3_xdir * (z)) #define OPS_ACC7(x, y, z) \ (x + xdim7_advec_cell_kernel3_xdir * (y) + \ xdim7_advec_cell_kernel3_xdir * ydim7_advec_cell_kernel3_xdir * (z)) // user function __device__ inline void advec_cell_kernel3_xdir_gpu(const double *vol_flux_x, const double *pre_vol, const int *xx, const double *vertexdx, const double *density1, const double *energy1, double *mass_flux_x, double *ener_flux) { double sigmat, sigmav, sigmam, sigma3, sigma4; double diffuw, diffdw, limiter; double one_by_six = 1.0 / 6.0; int x_max = field.x_max; int upwind, donor, downwind, dif; if (vol_flux_x[OPS_ACC0(0, 0, 0)] > 0.0) { upwind = -2; donor = -1; downwind = 0; dif = donor; } else if (xx[OPS_ACC2(1, 0, 0)] < x_max + 2 - 2) { upwind = 1; donor = 0; downwind = -1; dif = upwind; } else { upwind = 0; donor = 0; downwind = -1; dif = upwind; } sigmat = fabs(vol_flux_x[OPS_ACC0(0, 0, 0)]) / pre_vol[OPS_ACC1(donor, 0, 0)]; sigma3 = (1.0 + sigmat) * (vertexdx[OPS_ACC3(0, 0, 0)] / vertexdx[OPS_ACC3(dif, 0, 0)]); sigma4 = 2.0 - sigmat; sigmav = sigmat; diffuw = density1[OPS_ACC4(donor, 0, 0)] - density1[OPS_ACC4(upwind, 0, 0)]; diffdw = density1[OPS_ACC4(downwind, 0, 0)] - density1[OPS_ACC4(donor, 0, 0)]; if ((diffuw * diffdw) > 0.0) limiter = (1.0 - sigmav) * SIGN(1.0, diffdw) * MIN(MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter = 0.0; mass_flux_x[OPS_ACC6(0, 0, 0)] = (vol_flux_x[OPS_ACC0(0, 0, 0)]) * (density1[OPS_ACC4(donor, 0, 0)] + limiter); sigmam = fabs(mass_flux_x[OPS_ACC6(0, 0, 0)]) / (density1[OPS_ACC4(donor, 0, 0)] * pre_vol[OPS_ACC1(donor, 0, 0)]); diffuw = energy1[OPS_ACC5(donor, 0, 0)] - energy1[OPS_ACC5(upwind, 0, 0)]; diffdw = energy1[OPS_ACC5(downwind, 0, 0)] - energy1[OPS_ACC5(donor, 0, 0)]; if ((diffuw * diffdw) > 0.0) limiter = (1.0 - sigmam) * SIGN(1.0, diffdw) * MIN(MIN(fabs(diffuw), fabs(diffdw)), one_by_six * (sigma3 * fabs(diffuw) + sigma4 * fabs(diffdw))); else limiter = 0.0; ener_flux[OPS_ACC7(0, 0, 0)] = mass_flux_x[OPS_ACC6(0, 0, 0)] * (energy1[OPS_ACC5(donor, 0, 0)] + limiter); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 __global__ void ops_advec_cell_kernel3_xdir( const double *__restrict arg0, const double *__restrict arg1, const int *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, double *__restrict arg6, double *__restrict arg7, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim0_advec_cell_kernel3_xdir * ydim0_advec_cell_kernel3_xdir; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim1_advec_cell_kernel3_xdir * ydim1_advec_cell_kernel3_xdir; arg2 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim2_advec_cell_kernel3_xdir + idx_z * 0 * 1 * xdim2_advec_cell_kernel3_xdir * ydim2_advec_cell_kernel3_xdir; arg3 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim3_advec_cell_kernel3_xdir + idx_z * 0 * 1 * xdim3_advec_cell_kernel3_xdir * ydim3_advec_cell_kernel3_xdir; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim4_advec_cell_kernel3_xdir * ydim4_advec_cell_kernel3_xdir; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim5_advec_cell_kernel3_xdir * ydim5_advec_cell_kernel3_xdir; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim6_advec_cell_kernel3_xdir * ydim6_advec_cell_kernel3_xdir; arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_advec_cell_kernel3_xdir + idx_z * 1 * 1 * xdim7_advec_cell_kernel3_xdir * ydim7_advec_cell_kernel3_xdir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel3_xdir_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_cell_kernel3_xdir(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_advec_cell_kernel3_xdir_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 8, range, 111)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(111, "advec_cell_kernel3_xdir"); OPS_kernels[111].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]; int ydim7 = args[7].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel3_xdir_h || ydim0 != ydim0_advec_cell_kernel3_xdir_h || xdim1 != xdim1_advec_cell_kernel3_xdir_h || ydim1 != ydim1_advec_cell_kernel3_xdir_h || xdim2 != xdim2_advec_cell_kernel3_xdir_h || ydim2 != ydim2_advec_cell_kernel3_xdir_h || xdim3 != xdim3_advec_cell_kernel3_xdir_h || ydim3 != ydim3_advec_cell_kernel3_xdir_h || xdim4 != xdim4_advec_cell_kernel3_xdir_h || ydim4 != ydim4_advec_cell_kernel3_xdir_h || xdim5 != xdim5_advec_cell_kernel3_xdir_h || ydim5 != ydim5_advec_cell_kernel3_xdir_h || xdim6 != xdim6_advec_cell_kernel3_xdir_h || ydim6 != ydim6_advec_cell_kernel3_xdir_h || xdim7 != xdim7_advec_cell_kernel3_xdir_h || ydim7 != ydim7_advec_cell_kernel3_xdir_h) { cudaMemcpyToSymbol(xdim0_advec_cell_kernel3_xdir, &xdim0, sizeof(int)); xdim0_advec_cell_kernel3_xdir_h = xdim0; cudaMemcpyToSymbol(ydim0_advec_cell_kernel3_xdir, &ydim0, sizeof(int)); ydim0_advec_cell_kernel3_xdir_h = ydim0; cudaMemcpyToSymbol(xdim1_advec_cell_kernel3_xdir, &xdim1, sizeof(int)); xdim1_advec_cell_kernel3_xdir_h = xdim1; cudaMemcpyToSymbol(ydim1_advec_cell_kernel3_xdir, &ydim1, sizeof(int)); ydim1_advec_cell_kernel3_xdir_h = ydim1; cudaMemcpyToSymbol(xdim2_advec_cell_kernel3_xdir, &xdim2, sizeof(int)); xdim2_advec_cell_kernel3_xdir_h = xdim2; cudaMemcpyToSymbol(ydim2_advec_cell_kernel3_xdir, &ydim2, sizeof(int)); ydim2_advec_cell_kernel3_xdir_h = ydim2; cudaMemcpyToSymbol(xdim3_advec_cell_kernel3_xdir, &xdim3, sizeof(int)); xdim3_advec_cell_kernel3_xdir_h = xdim3; cudaMemcpyToSymbol(ydim3_advec_cell_kernel3_xdir, &ydim3, sizeof(int)); ydim3_advec_cell_kernel3_xdir_h = ydim3; cudaMemcpyToSymbol(xdim4_advec_cell_kernel3_xdir, &xdim4, sizeof(int)); xdim4_advec_cell_kernel3_xdir_h = xdim4; cudaMemcpyToSymbol(ydim4_advec_cell_kernel3_xdir, &ydim4, sizeof(int)); ydim4_advec_cell_kernel3_xdir_h = ydim4; cudaMemcpyToSymbol(xdim5_advec_cell_kernel3_xdir, &xdim5, sizeof(int)); xdim5_advec_cell_kernel3_xdir_h = xdim5; cudaMemcpyToSymbol(ydim5_advec_cell_kernel3_xdir, &ydim5, sizeof(int)); ydim5_advec_cell_kernel3_xdir_h = ydim5; cudaMemcpyToSymbol(xdim6_advec_cell_kernel3_xdir, &xdim6, sizeof(int)); xdim6_advec_cell_kernel3_xdir_h = xdim6; cudaMemcpyToSymbol(ydim6_advec_cell_kernel3_xdir, &ydim6, sizeof(int)); ydim6_advec_cell_kernel3_xdir_h = ydim6; cudaMemcpyToSymbol(xdim7_advec_cell_kernel3_xdir, &xdim7, sizeof(int)); xdim7_advec_cell_kernel3_xdir_h = xdim7; cudaMemcpyToSymbol(ydim7_advec_cell_kernel3_xdir, &ydim7, sizeof(int)); ydim7_advec_cell_kernel3_xdir_h = ydim7; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); int dat7 = (OPS_soa ? args[7].dat->type_size : args[7].dat->elem_size); char *p_a[8]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; int base7 = args[7].dat->base_offset + dat7 * 1 * (start[0] * args[7].stencil->stride[0]); base7 = base7 + dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1]); base7 = base7 + dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2]); p_a[7] = (char *)args[7].data_d + base7; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[111].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_advec_cell_kernel3_xdir<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[111].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[6], range); ops_set_halo_dirtybit3(&args[7], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[111].mpi_time += t2 - t1; OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[111].transfer += ops_compute_transfer(dim, start, end, &arg7); } } #ifdef OPS_LAZY void ops_par_loop_advec_cell_kernel3_xdir(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 111; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 111; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; desc->hash = ((desc->hash << 5) + desc->hash) + arg7.dat->index; desc->function = ops_par_loop_advec_cell_kernel3_xdir_execute; if (OPS_diags > 1) { ops_timing_realloc(111, "advec_cell_kernel3_xdir"); } ops_enqueue_kernel(desc); } #endif
98bad354a175659c95532de8d7f4a7984aa5ec79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialUpSamplingNearest.cu" #else #include "../common.h" static inline void THNN_(SpatialUpSamplingNearest_shapeCheck) (THCState *state,THCTensor *input, THCTensor *gradOutput, int scale_factor) { THArgCheck(input != NULL, 2, "4D input tensor expected but got NULL"); THArgCheck(scale_factor > 1, 4, "scale_factor must be greater than 1, but got: %d", scale_factor); THCUNN_argCheck(state, input->nDimension == 3 || input->nDimension == 4, 2, input, "3D or 4D input tensor expected but got: %s"); if (input->nDimension == 3) { int nChannels = THCTensor_(size)(state, input, 0); int inputHeight = THCTensor_(size)(state, input, 1); int inputWidth = THCTensor_(size)(state, input, 2); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 3, 0, nChannels); THCUNN_check_dim_size(state, gradOutput, 3, 1, outputHeight); THCUNN_check_dim_size(state, gradOutput, 3, 2, outputWidth); } } else { int nBatch = THCTensor_(size)(state, input, 0); int nChannels = THCTensor_(size)(state, input, 1); int inputHeight = THCTensor_(size)(state, input, 2); int inputWidth = THCTensor_(size)(state, input, 3); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 4, 0, nBatch); THCUNN_check_dim_size(state, gradOutput, 4, 1, nChannels); THCUNN_check_dim_size(state, gradOutput, 4, 2, outputHeight); THCUNN_check_dim_size(state, gradOutput, 4, 3, outputWidth); } } } void THNN_(SpatialUpSamplingNearest_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int scale_factor) { THCTensor_(zero)(state, output); THCUNN_assertSameGPU(state, 2, input, output); THNN_(SpatialUpSamplingNearest_shapeCheck)(state, input, NULL, scale_factor); int inputHeight = THCTensor_(size)(state, input, input->nDimension-2); int inputWidth = THCTensor_(size)(state, input, input->nDimension-1); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (input->nDimension == 3) { THCTensor_(resize3d)(state, output, THCTensor_(size)(state, input, 0), outputHeight, outputWidth); } else { THCTensor_(resize4d)(state, output, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), outputHeight, outputWidth); } input = THCTensor_(newContiguous)(state, input); // This is for allocating output Tensor long no_elements = 1; for(int i = 0; i < input->nDimension; i++){ no_elements *= input->size[i]; } no_elements *= scale_factor * scale_factor; int d1; int d2; int d3; if (input->nDimension == 3) { d1 = output->size[0]; d2 = output->size[1]; d3 = output->size[2]; } else { d1 = output->size[1]; d2 = output->size[2]; d3 = output->size[3]; } real *input_data = THCTensor_(data)(state, input); real *output_data = THCTensor_(data)(state, output); // cuda blocks & threads: long nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: hipLaunchKernelGGL(( upscale), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, no_elements, scale_factor, d1, d2, d3); THCudaCheck(hipGetLastError()); // final cut: THCTensor_(free)(state, input); } void THNN_(SpatialUpSamplingNearest_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int scale_factor) { THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THNN_(SpatialUpSamplingNearest_shapeCheck)(state, input, gradOutput, scale_factor); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); real *gradInput_data = THCTensor_(data)(state, gradInput); real *gradOutput_data = THCTensor_(data)(state, gradOutput); long no_elements = 1; for(int i = 0; i < gradInput->nDimension; i++){ no_elements *= gradInput->size[i]; } int d1; int d2; int d3; if (gradInput->nDimension == 3) { d1 = gradInput->size[0]; d2 = gradInput->size[1]; d3 = gradInput->size[2]; } else { d1 = gradInput->size[1]; d2 = gradInput->size[2]; d3 = gradInput->size[3]; } // cuda blocks & threads: long nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: hipLaunchKernelGGL(( downscale<real ,accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, no_elements, scale_factor, d1, d2, d3); THCudaCheck(hipGetLastError()); } #endif
98bad354a175659c95532de8d7f4a7984aa5ec79.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialUpSamplingNearest.cu" #else #include "../common.h" static inline void THNN_(SpatialUpSamplingNearest_shapeCheck) (THCState *state,THCTensor *input, THCTensor *gradOutput, int scale_factor) { THArgCheck(input != NULL, 2, "4D input tensor expected but got NULL"); THArgCheck(scale_factor > 1, 4, "scale_factor must be greater than 1, but got: %d", scale_factor); THCUNN_argCheck(state, input->nDimension == 3 || input->nDimension == 4, 2, input, "3D or 4D input tensor expected but got: %s"); if (input->nDimension == 3) { int nChannels = THCTensor_(size)(state, input, 0); int inputHeight = THCTensor_(size)(state, input, 1); int inputWidth = THCTensor_(size)(state, input, 2); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 3, 0, nChannels); THCUNN_check_dim_size(state, gradOutput, 3, 1, outputHeight); THCUNN_check_dim_size(state, gradOutput, 3, 2, outputWidth); } } else { int nBatch = THCTensor_(size)(state, input, 0); int nChannels = THCTensor_(size)(state, input, 1); int inputHeight = THCTensor_(size)(state, input, 2); int inputWidth = THCTensor_(size)(state, input, 3); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 4, 0, nBatch); THCUNN_check_dim_size(state, gradOutput, 4, 1, nChannels); THCUNN_check_dim_size(state, gradOutput, 4, 2, outputHeight); THCUNN_check_dim_size(state, gradOutput, 4, 3, outputWidth); } } } void THNN_(SpatialUpSamplingNearest_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int scale_factor) { THCTensor_(zero)(state, output); THCUNN_assertSameGPU(state, 2, input, output); THNN_(SpatialUpSamplingNearest_shapeCheck)(state, input, NULL, scale_factor); int inputHeight = THCTensor_(size)(state, input, input->nDimension-2); int inputWidth = THCTensor_(size)(state, input, input->nDimension-1); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (input->nDimension == 3) { THCTensor_(resize3d)(state, output, THCTensor_(size)(state, input, 0), outputHeight, outputWidth); } else { THCTensor_(resize4d)(state, output, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), outputHeight, outputWidth); } input = THCTensor_(newContiguous)(state, input); // This is for allocating output Tensor long no_elements = 1; for(int i = 0; i < input->nDimension; i++){ no_elements *= input->size[i]; } no_elements *= scale_factor * scale_factor; int d1; int d2; int d3; if (input->nDimension == 3) { d1 = output->size[0]; d2 = output->size[1]; d3 = output->size[2]; } else { d1 = output->size[1]; d2 = output->size[2]; d3 = output->size[3]; } real *input_data = THCTensor_(data)(state, input); real *output_data = THCTensor_(data)(state, output); // cuda blocks & threads: long nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: upscale<<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data, no_elements, scale_factor, d1, d2, d3); THCudaCheck(cudaGetLastError()); // final cut: THCTensor_(free)(state, input); } void THNN_(SpatialUpSamplingNearest_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int scale_factor) { THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THNN_(SpatialUpSamplingNearest_shapeCheck)(state, input, gradOutput, scale_factor); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); real *gradInput_data = THCTensor_(data)(state, gradInput); real *gradOutput_data = THCTensor_(data)(state, gradOutput); long no_elements = 1; for(int i = 0; i < gradInput->nDimension; i++){ no_elements *= gradInput->size[i]; } int d1; int d2; int d3; if (gradInput->nDimension == 3) { d1 = gradInput->size[0]; d2 = gradInput->size[1]; d3 = gradInput->size[2]; } else { d1 = gradInput->size[1]; d2 = gradInput->size[2]; d3 = gradInput->size[3]; } // cuda blocks & threads: long nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this long n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); long n_yblocks = (long)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: downscale<real ,accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, no_elements, scale_factor, d1, d2, d3); THCudaCheck(cudaGetLastError()); } #endif
e68d36ec884ea2e2712fea5b2a25db3fb0d5892e.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <blas.hpp> #include <arith.hpp> #include <cast.hpp> #include <common/err_common.hpp> #include <common/half.hpp> #include <complex.hpp> #include <copy.hpp> #include <cublas.hpp> #include <rocblas.h> #include <hip/hip_runtime.h> #include <err_cuda.hpp> #include <math.hpp> #include <platform.hpp> #include <reduce.hpp> #include <tile.hpp> #include <transpose.hpp> #include <types.hpp> #include <cassert> #include <functional> #include <stdexcept> #include <string> #include <vector> using common::half; using common::kernel_type; using std::is_same; using std::vector; namespace cuda { hipblasOperation_t toCblasTranspose(af_mat_prop opt) { hipblasOperation_t out = HIPBLAS_OP_N; switch (opt) { case AF_MAT_NONE: out = HIPBLAS_OP_N; break; case AF_MAT_TRANS: out = HIPBLAS_OP_T; break; case AF_MAT_CTRANS: out = HIPBLAS_OP_C; break; default: AF_ERROR("INVALID af_mat_prop", AF_ERR_ARG); } return out; } template<typename T> using gemm_func_def = std::function<hipblasStatus_t( hipblasHandle_t, hipblasOperation_t, hipblasOperation_t, int, int, int, const T *, const T *, int, const T *, int, const T *, T *, int)>; template<typename T> using gemmBatched_func_def = std::function<hipblasStatus_t( hipblasHandle_t, hipblasOperation_t, hipblasOperation_t, int, int, int, const T *, const T **, int, const T **, int, const T *, T **, int, int)>; template<typename T> using trsm_func_def = std::function<hipblasStatus_t( hipblasHandle_t, hipblasSideMode_t, hipblasFillMode_t, hipblasOperation_t, hipblasDiagType_t, int, int, const T *, const T *, int, T *, int)>; #define BLAS_FUNC_DEF(FUNC) \ template<typename T> \ FUNC##_func_def<T> FUNC##_func(); #define BLAS_FUNC(FUNC, TYPE, PREFIX) \ template<> \ FUNC##_func_def<TYPE> FUNC##_func<TYPE>() { \ return &cublas##PREFIX##FUNC; \ } BLAS_FUNC_DEF(gemm) BLAS_FUNC(gemm, float, S) BLAS_FUNC(gemm, cfloat, C) BLAS_FUNC(gemm, double, D) BLAS_FUNC(gemm, cdouble, Z) BLAS_FUNC(gemm, __half, H) BLAS_FUNC_DEF(gemmBatched) BLAS_FUNC(gemmBatched, float, S) BLAS_FUNC(gemmBatched, cfloat, C) BLAS_FUNC(gemmBatched, double, D) BLAS_FUNC(gemmBatched, cdouble, Z) BLAS_FUNC(gemmBatched, __half, H) BLAS_FUNC_DEF(trsm) BLAS_FUNC(trsm, float, S) BLAS_FUNC(trsm, cfloat, C) BLAS_FUNC(trsm, double, D) BLAS_FUNC(trsm, cdouble, Z) #undef BLAS_FUNC #undef BLAS_FUNC_DEF template<typename T, bool conjugate> struct dot_func_def_t { typedef hipblasStatus_t (*dot_func_def)(hipblasHandle_t, int, const T *, int, const T *, int, T *); }; #define BLAS_FUNC_DEF(FUNC) \ template<typename T, bool conjugate> \ typename FUNC##_func_def_t<T, conjugate>::FUNC##_func_def FUNC##_func(); #define BLAS_FUNC(FUNC, TYPE, CONJUGATE, PREFIX) \ template<> \ typename FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def \ FUNC##_func<TYPE, CONJUGATE>() { \ return (FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def) & \ cublas##PREFIX##FUNC; \ } BLAS_FUNC_DEF(dot) BLAS_FUNC(dot, float, true, S) BLAS_FUNC(dot, double, true, D) BLAS_FUNC(dot, float, false, S) BLAS_FUNC(dot, double, false, D) #undef BLAS_FUNC #define BLAS_FUNC(FUNC, TYPE, CONJUGATE, PREFIX, SUFFIX) \ template<> \ typename FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def \ FUNC##_func<TYPE, CONJUGATE>() { \ return (FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def) & \ cublas##PREFIX##FUNC##SUFFIX; \ } BLAS_FUNC_DEF(dot) BLAS_FUNC(dot, cfloat, true, C, c) BLAS_FUNC(dot, cdouble, true, Z, c) BLAS_FUNC(dot, cfloat, false, C, u) BLAS_FUNC(dot, cdouble, false, Z, u) #undef BLAS_FUNC #undef BLAS_FUNC_DEF template<typename T> hipDataType getType(); template<> hipDataType getType<float>() { return HIP_R_32F; } template<> hipDataType getType<cfloat>() { return HIP_C_32F; } template<> hipDataType getType<double>() { return HIP_R_64F; } template<> hipDataType getType<cdouble>() { return HIP_C_64F; } template<> hipDataType getType<half>() { return HIP_R_16F; } template<typename T> hipDataType getComputeType() { return getType<T>(); } template<> hipDataType getComputeType<half>() { hipDataType algo = getType<half>(); // There is probbaly a bug in nvidia cuda docs and/or drivers: According to // https://docs.nvidia.com/cuda/cublas/index.html#cublas-GemmEx computeType // could be 32F even if A/B inputs are 16F. But CudaCompute 6.1 GPUs (for // example GTX10X0) dont seem to be capbale to compute at f32 when the // inputs are f16: results are inf if trying to do so and hipblasGemmEx even // returns OK. At the moment let's comment out : the drawback is just that // the speed of f16 computation on these GPUs is very slow: // // auto dev = getDeviceProp(getActiveDeviceId()); // if (dev.major == // 6 && dev.minor == 1) { algo = HIP_R_32F; } return algo; } template<typename T> hipblasGemmAlgo_t selectGEMMAlgorithm() { return HIPBLAS_GEMM_DEFAULT; } template<> hipblasGemmAlgo_t selectGEMMAlgorithm<common::half>() { auto dev = getDeviceProp(getActiveDeviceId()); hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT; if (dev.major >= 7) { algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; } return algo; } template<> hipblasGemmAlgo_t selectGEMMAlgorithm<__half>() { return selectGEMMAlgorithm<common::half>(); } template<typename T> hipblasStatus_t gemmDispatch(BlasHandle handle, hipblasOperation_t lOpts, hipblasOperation_t rOpts, int M, int N, int K, const T *alpha, const Array<T> &lhs, dim_t lStride, const Array<T> &rhs, dim_t rStride, const T *beta, Array<T> &out, dim_t oleading) { auto prop = getDeviceProp(getActiveDeviceId()); if (prop.major > 3) { return hipblasGemmEx( blasHandle(), lOpts, rOpts, M, N, K, alpha, lhs.get(), getType<T>(), lStride, rhs.get(), getType<T>(), rStride, beta, out.get(), getType<T>(), out.strides()[1], getComputeType<T>(), // Compute type // NOTE: When using the CUBLAS_GEMM_DEFAULT_TENSOR_OP algorithm // for the cublasGemm*Ex functions, the performance of the // fp32 numbers seem to increase dramatically. Their numerical // accuracy is also different compared to regular gemm fuctions. // The HIPBLAS_GEMM_DEFAULT algorithm selection does not experience // this change. Does this imply that the TENSOR_OP function // performs the computation in fp16 bit even when the compute // type is HIP_R_32F? selectGEMMAlgorithm<T>()); } else { using Nt = typename common::kernel_type<T>::native; return gemm_func<Nt>()(blasHandle(), lOpts, rOpts, M, N, K, (Nt *)alpha, (Nt *)lhs.get(), lStride, (Nt *)rhs.get(), rStride, (Nt *)beta, (Nt *)out.get(), oleading); } } template<typename T> hipblasStatus_t gemmBatchedDispatch(BlasHandle handle, hipblasOperation_t lOpts, hipblasOperation_t rOpts, int M, int N, int K, const T *alpha, const T **lptrs, int lStrides, const T **rptrs, int rStrides, const T *beta, T **optrs, int oStrides, int batchSize) { auto prop = getDeviceProp(getActiveDeviceId()); if (prop.major > 3) { return hipblasGemmBatchedEx( blasHandle(), lOpts, rOpts, M, N, K, alpha, (const void **)lptrs, getType<T>(), lStrides, (const void **)rptrs, getType<T>(), rStrides, beta, (void **)optrs, getType<T>(), oStrides, batchSize, getComputeType<T>(), // compute type // NOTE: When using the CUBLAS_GEMM_DEFAULT_TENSOR_OP algorithm // for the cublasGemm*Ex functions, the performance of the // fp32 numbers seem to increase dramatically. Their numerical // accuracy is also different compared to regular gemm fuctions. // The HIPBLAS_GEMM_DEFAULT algorithm selection does not experience // this change. Does this imply that the TENSOR_OP function // performs the computation in fp16 bit even when the compute // type is HIP_R_32F? selectGEMMAlgorithm<T>()); } else { using Nt = typename common::kernel_type<T>::native; return gemmBatched_func<Nt>()( blasHandle(), lOpts, rOpts, M, N, K, (const Nt *)alpha, (const Nt **)lptrs, lStrides, (const Nt **)rptrs, rStrides, (const Nt *)beta, (Nt **)optrs, oStrides, batchSize); } } template<typename T> void gemm(Array<T> &out, af_mat_prop optLhs, af_mat_prop optRhs, const T *alpha, const Array<T> &lhs, const Array<T> &rhs, const T *beta) { const hipblasOperation_t lOpts = toCblasTranspose(optLhs); const hipblasOperation_t rOpts = toCblasTranspose(optRhs); const int aRowDim = (lOpts == HIPBLAS_OP_N) ? 0 : 1; const int aColDim = (lOpts == HIPBLAS_OP_N) ? 1 : 0; const int bColDim = (rOpts == HIPBLAS_OP_N) ? 1 : 0; const dim4 lDims = lhs.dims(); const dim4 rDims = rhs.dims(); const int M = lDims[aRowDim]; const int N = rDims[bColDim]; const int K = lDims[aColDim]; const dim4 oDims = out.dims(); dim4 lStrides = lhs.strides(); dim4 rStrides = rhs.strides(); dim4 oStrides = out.strides(); if (oDims.ndims() <= 2) { CUBLAS_CHECK(gemmDispatch<T>(blasHandle(), lOpts, rOpts, M, N, K, alpha, lhs, lStrides[1], rhs, rStrides[1], beta, out, oStrides[1])); } else { int batchSize = oDims[2] * oDims[3]; vector<const T *> lptrs(batchSize); vector<const T *> rptrs(batchSize); vector<T *> optrs(batchSize); bool is_l_d2_batched = oDims[2] == lDims[2]; bool is_l_d3_batched = oDims[3] == lDims[3]; bool is_r_d2_batched = oDims[2] == rDims[2]; bool is_r_d3_batched = oDims[3] == rDims[3]; const T *lptr = lhs.get(); const T *rptr = rhs.get(); T *optr = out.get(); for (int n = 0; n < batchSize; n++) { int w = n / oDims[2]; int z = n - w * oDims[2]; int loff = z * (is_l_d2_batched * lStrides[2]) + w * (is_l_d3_batched * lStrides[3]); int roff = z * (is_r_d2_batched * rStrides[2]) + w * (is_r_d3_batched * rStrides[3]); lptrs[n] = lptr + loff; rptrs[n] = rptr + roff; optrs[n] = optr + z * oStrides[2] + w * oStrides[3]; } size_t bytes = batchSize * sizeof(T **); auto d_lptrs = memAlloc<uchar>(bytes); auto d_rptrs = memAlloc<uchar>(bytes); auto d_optrs = memAlloc<uchar>(bytes); CUDA_CHECK(hipMemcpyAsync(d_lptrs.get(), lptrs.data(), bytes, hipMemcpyHostToDevice, getActiveStream())); CUDA_CHECK(hipMemcpyAsync(d_rptrs.get(), rptrs.data(), bytes, hipMemcpyHostToDevice, getActiveStream())); CUDA_CHECK(hipMemcpyAsync(d_optrs.get(), optrs.data(), bytes, hipMemcpyHostToDevice, getActiveStream())); // Call this before the gemm call so that you don't have to wait for the // computation. Even though it would make more sense to put it // afterwards CUDA_CHECK(hipStreamSynchronize(getActiveStream())); using Nt = typename common::kernel_type<T>::native; CUBLAS_CHECK(gemmBatchedDispatch( blasHandle(), lOpts, rOpts, M, N, K, alpha, (const T **)d_lptrs.get(), lStrides[1], (const T **)d_rptrs.get(), rStrides[1], beta, (T **)d_optrs.get(), oStrides[1], batchSize)); } } template<typename T> Array<T> dot(const Array<T> &lhs, const Array<T> &rhs, af_mat_prop optLhs, af_mat_prop optRhs) { auto lhs_ = (optLhs == AF_MAT_NONE ? lhs : conj<T>(lhs)); auto rhs_ = (optRhs == AF_MAT_NONE ? rhs : conj<T>(rhs)); auto temp = arithOp<T, af_mul_t>(lhs_, rhs_, lhs_.dims()); return reduce<af_add_t, T, T>(temp, 0, false, 0); } template<typename T> void trsm(const Array<T> &lhs, Array<T> &rhs, af_mat_prop trans, bool is_upper, bool is_left, bool is_unit) { // dim4 lDims = lhs.dims(); dim4 rDims = rhs.dims(); int M = rDims[0]; int N = rDims[1]; T alpha = scalar<T>(1); dim4 lStrides = lhs.strides(); dim4 rStrides = rhs.strides(); CUBLAS_CHECK(trsm_func<T>()( blasHandle(), is_left ? HIPBLAS_SIDE_LEFT : HIPBLAS_SIDE_RIGHT, is_upper ? HIPBLAS_FILL_MODE_UPPER : HIPBLAS_FILL_MODE_LOWER, toCblasTranspose(trans), is_unit ? HIPBLAS_DIAG_UNIT : HIPBLAS_DIAG_NON_UNIT, M, N, &alpha, lhs.get(), lStrides[1], rhs.get(), rStrides[1])); } #define INSTANTIATE_GEMM(TYPE) \ template void gemm<TYPE>(Array<TYPE> & out, af_mat_prop optLhs, \ af_mat_prop optRhs, const TYPE *alpha, \ const Array<TYPE> &lhs, const Array<TYPE> &rhs, \ const TYPE *beta); INSTANTIATE_GEMM(float) INSTANTIATE_GEMM(cfloat) INSTANTIATE_GEMM(double) INSTANTIATE_GEMM(cdouble) INSTANTIATE_GEMM(half) #define INSTANTIATE_DOT(TYPE) \ template Array<TYPE> dot<TYPE>(const Array<TYPE> &lhs, \ const Array<TYPE> &rhs, af_mat_prop optLhs, \ af_mat_prop optRhs); INSTANTIATE_DOT(float) INSTANTIATE_DOT(double) INSTANTIATE_DOT(cfloat) INSTANTIATE_DOT(cdouble) INSTANTIATE_DOT(half) #define INSTANTIATE_TRSM(TYPE) \ template void trsm<TYPE>(const Array<TYPE> &lhs, Array<TYPE> &rhs, \ af_mat_prop trans, bool is_upper, bool is_left, \ bool is_unit); INSTANTIATE_TRSM(float) INSTANTIATE_TRSM(cfloat) INSTANTIATE_TRSM(double) INSTANTIATE_TRSM(cdouble) } // namespace cuda
e68d36ec884ea2e2712fea5b2a25db3fb0d5892e.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <blas.hpp> #include <arith.hpp> #include <cast.hpp> #include <common/err_common.hpp> #include <common/half.hpp> #include <complex.hpp> #include <copy.hpp> #include <cublas.hpp> #include <cublas_v2.h> #include <cuda_runtime.h> #include <err_cuda.hpp> #include <math.hpp> #include <platform.hpp> #include <reduce.hpp> #include <tile.hpp> #include <transpose.hpp> #include <types.hpp> #include <cassert> #include <functional> #include <stdexcept> #include <string> #include <vector> using common::half; using common::kernel_type; using std::is_same; using std::vector; namespace cuda { cublasOperation_t toCblasTranspose(af_mat_prop opt) { cublasOperation_t out = CUBLAS_OP_N; switch (opt) { case AF_MAT_NONE: out = CUBLAS_OP_N; break; case AF_MAT_TRANS: out = CUBLAS_OP_T; break; case AF_MAT_CTRANS: out = CUBLAS_OP_C; break; default: AF_ERROR("INVALID af_mat_prop", AF_ERR_ARG); } return out; } template<typename T> using gemm_func_def = std::function<cublasStatus_t( cublasHandle_t, cublasOperation_t, cublasOperation_t, int, int, int, const T *, const T *, int, const T *, int, const T *, T *, int)>; template<typename T> using gemmBatched_func_def = std::function<cublasStatus_t( cublasHandle_t, cublasOperation_t, cublasOperation_t, int, int, int, const T *, const T **, int, const T **, int, const T *, T **, int, int)>; template<typename T> using trsm_func_def = std::function<cublasStatus_t( cublasHandle_t, cublasSideMode_t, cublasFillMode_t, cublasOperation_t, cublasDiagType_t, int, int, const T *, const T *, int, T *, int)>; #define BLAS_FUNC_DEF(FUNC) \ template<typename T> \ FUNC##_func_def<T> FUNC##_func(); #define BLAS_FUNC(FUNC, TYPE, PREFIX) \ template<> \ FUNC##_func_def<TYPE> FUNC##_func<TYPE>() { \ return &cublas##PREFIX##FUNC; \ } BLAS_FUNC_DEF(gemm) BLAS_FUNC(gemm, float, S) BLAS_FUNC(gemm, cfloat, C) BLAS_FUNC(gemm, double, D) BLAS_FUNC(gemm, cdouble, Z) BLAS_FUNC(gemm, __half, H) BLAS_FUNC_DEF(gemmBatched) BLAS_FUNC(gemmBatched, float, S) BLAS_FUNC(gemmBatched, cfloat, C) BLAS_FUNC(gemmBatched, double, D) BLAS_FUNC(gemmBatched, cdouble, Z) BLAS_FUNC(gemmBatched, __half, H) BLAS_FUNC_DEF(trsm) BLAS_FUNC(trsm, float, S) BLAS_FUNC(trsm, cfloat, C) BLAS_FUNC(trsm, double, D) BLAS_FUNC(trsm, cdouble, Z) #undef BLAS_FUNC #undef BLAS_FUNC_DEF template<typename T, bool conjugate> struct dot_func_def_t { typedef cublasStatus_t (*dot_func_def)(cublasHandle_t, int, const T *, int, const T *, int, T *); }; #define BLAS_FUNC_DEF(FUNC) \ template<typename T, bool conjugate> \ typename FUNC##_func_def_t<T, conjugate>::FUNC##_func_def FUNC##_func(); #define BLAS_FUNC(FUNC, TYPE, CONJUGATE, PREFIX) \ template<> \ typename FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def \ FUNC##_func<TYPE, CONJUGATE>() { \ return (FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def) & \ cublas##PREFIX##FUNC; \ } BLAS_FUNC_DEF(dot) BLAS_FUNC(dot, float, true, S) BLAS_FUNC(dot, double, true, D) BLAS_FUNC(dot, float, false, S) BLAS_FUNC(dot, double, false, D) #undef BLAS_FUNC #define BLAS_FUNC(FUNC, TYPE, CONJUGATE, PREFIX, SUFFIX) \ template<> \ typename FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def \ FUNC##_func<TYPE, CONJUGATE>() { \ return (FUNC##_func_def_t<TYPE, CONJUGATE>::FUNC##_func_def) & \ cublas##PREFIX##FUNC##SUFFIX; \ } BLAS_FUNC_DEF(dot) BLAS_FUNC(dot, cfloat, true, C, c) BLAS_FUNC(dot, cdouble, true, Z, c) BLAS_FUNC(dot, cfloat, false, C, u) BLAS_FUNC(dot, cdouble, false, Z, u) #undef BLAS_FUNC #undef BLAS_FUNC_DEF template<typename T> cudaDataType_t getType(); template<> cudaDataType_t getType<float>() { return CUDA_R_32F; } template<> cudaDataType_t getType<cfloat>() { return CUDA_C_32F; } template<> cudaDataType_t getType<double>() { return CUDA_R_64F; } template<> cudaDataType_t getType<cdouble>() { return CUDA_C_64F; } template<> cudaDataType_t getType<half>() { return CUDA_R_16F; } template<typename T> cudaDataType_t getComputeType() { return getType<T>(); } template<> cudaDataType_t getComputeType<half>() { cudaDataType_t algo = getType<half>(); // There is probbaly a bug in nvidia cuda docs and/or drivers: According to // https://docs.nvidia.com/cuda/cublas/index.html#cublas-GemmEx computeType // could be 32F even if A/B inputs are 16F. But CudaCompute 6.1 GPUs (for // example GTX10X0) dont seem to be capbale to compute at f32 when the // inputs are f16: results are inf if trying to do so and cublasGemmEx even // returns OK. At the moment let's comment out : the drawback is just that // the speed of f16 computation on these GPUs is very slow: // // auto dev = getDeviceProp(getActiveDeviceId()); // if (dev.major == // 6 && dev.minor == 1) { algo = CUDA_R_32F; } return algo; } template<typename T> cublasGemmAlgo_t selectGEMMAlgorithm() { return CUBLAS_GEMM_DEFAULT; } template<> cublasGemmAlgo_t selectGEMMAlgorithm<common::half>() { auto dev = getDeviceProp(getActiveDeviceId()); cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT; if (dev.major >= 7) { algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; } return algo; } template<> cublasGemmAlgo_t selectGEMMAlgorithm<__half>() { return selectGEMMAlgorithm<common::half>(); } template<typename T> cublasStatus_t gemmDispatch(BlasHandle handle, cublasOperation_t lOpts, cublasOperation_t rOpts, int M, int N, int K, const T *alpha, const Array<T> &lhs, dim_t lStride, const Array<T> &rhs, dim_t rStride, const T *beta, Array<T> &out, dim_t oleading) { auto prop = getDeviceProp(getActiveDeviceId()); if (prop.major > 3) { return cublasGemmEx( blasHandle(), lOpts, rOpts, M, N, K, alpha, lhs.get(), getType<T>(), lStride, rhs.get(), getType<T>(), rStride, beta, out.get(), getType<T>(), out.strides()[1], getComputeType<T>(), // Compute type // NOTE: When using the CUBLAS_GEMM_DEFAULT_TENSOR_OP algorithm // for the cublasGemm*Ex functions, the performance of the // fp32 numbers seem to increase dramatically. Their numerical // accuracy is also different compared to regular gemm fuctions. // The CUBLAS_GEMM_DEFAULT algorithm selection does not experience // this change. Does this imply that the TENSOR_OP function // performs the computation in fp16 bit even when the compute // type is CUDA_R_32F? selectGEMMAlgorithm<T>()); } else { using Nt = typename common::kernel_type<T>::native; return gemm_func<Nt>()(blasHandle(), lOpts, rOpts, M, N, K, (Nt *)alpha, (Nt *)lhs.get(), lStride, (Nt *)rhs.get(), rStride, (Nt *)beta, (Nt *)out.get(), oleading); } } template<typename T> cublasStatus_t gemmBatchedDispatch(BlasHandle handle, cublasOperation_t lOpts, cublasOperation_t rOpts, int M, int N, int K, const T *alpha, const T **lptrs, int lStrides, const T **rptrs, int rStrides, const T *beta, T **optrs, int oStrides, int batchSize) { auto prop = getDeviceProp(getActiveDeviceId()); if (prop.major > 3) { return cublasGemmBatchedEx( blasHandle(), lOpts, rOpts, M, N, K, alpha, (const void **)lptrs, getType<T>(), lStrides, (const void **)rptrs, getType<T>(), rStrides, beta, (void **)optrs, getType<T>(), oStrides, batchSize, getComputeType<T>(), // compute type // NOTE: When using the CUBLAS_GEMM_DEFAULT_TENSOR_OP algorithm // for the cublasGemm*Ex functions, the performance of the // fp32 numbers seem to increase dramatically. Their numerical // accuracy is also different compared to regular gemm fuctions. // The CUBLAS_GEMM_DEFAULT algorithm selection does not experience // this change. Does this imply that the TENSOR_OP function // performs the computation in fp16 bit even when the compute // type is CUDA_R_32F? selectGEMMAlgorithm<T>()); } else { using Nt = typename common::kernel_type<T>::native; return gemmBatched_func<Nt>()( blasHandle(), lOpts, rOpts, M, N, K, (const Nt *)alpha, (const Nt **)lptrs, lStrides, (const Nt **)rptrs, rStrides, (const Nt *)beta, (Nt **)optrs, oStrides, batchSize); } } template<typename T> void gemm(Array<T> &out, af_mat_prop optLhs, af_mat_prop optRhs, const T *alpha, const Array<T> &lhs, const Array<T> &rhs, const T *beta) { const cublasOperation_t lOpts = toCblasTranspose(optLhs); const cublasOperation_t rOpts = toCblasTranspose(optRhs); const int aRowDim = (lOpts == CUBLAS_OP_N) ? 0 : 1; const int aColDim = (lOpts == CUBLAS_OP_N) ? 1 : 0; const int bColDim = (rOpts == CUBLAS_OP_N) ? 1 : 0; const dim4 lDims = lhs.dims(); const dim4 rDims = rhs.dims(); const int M = lDims[aRowDim]; const int N = rDims[bColDim]; const int K = lDims[aColDim]; const dim4 oDims = out.dims(); dim4 lStrides = lhs.strides(); dim4 rStrides = rhs.strides(); dim4 oStrides = out.strides(); if (oDims.ndims() <= 2) { CUBLAS_CHECK(gemmDispatch<T>(blasHandle(), lOpts, rOpts, M, N, K, alpha, lhs, lStrides[1], rhs, rStrides[1], beta, out, oStrides[1])); } else { int batchSize = oDims[2] * oDims[3]; vector<const T *> lptrs(batchSize); vector<const T *> rptrs(batchSize); vector<T *> optrs(batchSize); bool is_l_d2_batched = oDims[2] == lDims[2]; bool is_l_d3_batched = oDims[3] == lDims[3]; bool is_r_d2_batched = oDims[2] == rDims[2]; bool is_r_d3_batched = oDims[3] == rDims[3]; const T *lptr = lhs.get(); const T *rptr = rhs.get(); T *optr = out.get(); for (int n = 0; n < batchSize; n++) { int w = n / oDims[2]; int z = n - w * oDims[2]; int loff = z * (is_l_d2_batched * lStrides[2]) + w * (is_l_d3_batched * lStrides[3]); int roff = z * (is_r_d2_batched * rStrides[2]) + w * (is_r_d3_batched * rStrides[3]); lptrs[n] = lptr + loff; rptrs[n] = rptr + roff; optrs[n] = optr + z * oStrides[2] + w * oStrides[3]; } size_t bytes = batchSize * sizeof(T **); auto d_lptrs = memAlloc<uchar>(bytes); auto d_rptrs = memAlloc<uchar>(bytes); auto d_optrs = memAlloc<uchar>(bytes); CUDA_CHECK(cudaMemcpyAsync(d_lptrs.get(), lptrs.data(), bytes, cudaMemcpyHostToDevice, getActiveStream())); CUDA_CHECK(cudaMemcpyAsync(d_rptrs.get(), rptrs.data(), bytes, cudaMemcpyHostToDevice, getActiveStream())); CUDA_CHECK(cudaMemcpyAsync(d_optrs.get(), optrs.data(), bytes, cudaMemcpyHostToDevice, getActiveStream())); // Call this before the gemm call so that you don't have to wait for the // computation. Even though it would make more sense to put it // afterwards CUDA_CHECK(cudaStreamSynchronize(getActiveStream())); using Nt = typename common::kernel_type<T>::native; CUBLAS_CHECK(gemmBatchedDispatch( blasHandle(), lOpts, rOpts, M, N, K, alpha, (const T **)d_lptrs.get(), lStrides[1], (const T **)d_rptrs.get(), rStrides[1], beta, (T **)d_optrs.get(), oStrides[1], batchSize)); } } template<typename T> Array<T> dot(const Array<T> &lhs, const Array<T> &rhs, af_mat_prop optLhs, af_mat_prop optRhs) { auto lhs_ = (optLhs == AF_MAT_NONE ? lhs : conj<T>(lhs)); auto rhs_ = (optRhs == AF_MAT_NONE ? rhs : conj<T>(rhs)); auto temp = arithOp<T, af_mul_t>(lhs_, rhs_, lhs_.dims()); return reduce<af_add_t, T, T>(temp, 0, false, 0); } template<typename T> void trsm(const Array<T> &lhs, Array<T> &rhs, af_mat_prop trans, bool is_upper, bool is_left, bool is_unit) { // dim4 lDims = lhs.dims(); dim4 rDims = rhs.dims(); int M = rDims[0]; int N = rDims[1]; T alpha = scalar<T>(1); dim4 lStrides = lhs.strides(); dim4 rStrides = rhs.strides(); CUBLAS_CHECK(trsm_func<T>()( blasHandle(), is_left ? CUBLAS_SIDE_LEFT : CUBLAS_SIDE_RIGHT, is_upper ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER, toCblasTranspose(trans), is_unit ? CUBLAS_DIAG_UNIT : CUBLAS_DIAG_NON_UNIT, M, N, &alpha, lhs.get(), lStrides[1], rhs.get(), rStrides[1])); } #define INSTANTIATE_GEMM(TYPE) \ template void gemm<TYPE>(Array<TYPE> & out, af_mat_prop optLhs, \ af_mat_prop optRhs, const TYPE *alpha, \ const Array<TYPE> &lhs, const Array<TYPE> &rhs, \ const TYPE *beta); INSTANTIATE_GEMM(float) INSTANTIATE_GEMM(cfloat) INSTANTIATE_GEMM(double) INSTANTIATE_GEMM(cdouble) INSTANTIATE_GEMM(half) #define INSTANTIATE_DOT(TYPE) \ template Array<TYPE> dot<TYPE>(const Array<TYPE> &lhs, \ const Array<TYPE> &rhs, af_mat_prop optLhs, \ af_mat_prop optRhs); INSTANTIATE_DOT(float) INSTANTIATE_DOT(double) INSTANTIATE_DOT(cfloat) INSTANTIATE_DOT(cdouble) INSTANTIATE_DOT(half) #define INSTANTIATE_TRSM(TYPE) \ template void trsm<TYPE>(const Array<TYPE> &lhs, Array<TYPE> &rhs, \ af_mat_prop trans, bool is_upper, bool is_left, \ bool is_unit); INSTANTIATE_TRSM(float) INSTANTIATE_TRSM(cfloat) INSTANTIATE_TRSM(double) INSTANTIATE_TRSM(cdouble) } // namespace cuda
854a3129c3b3929284afc87748e03121c7a74644.hip
// !!! This is a file automatically generated by hipify!!! // CUDA programming // Exercise n. 10 #include <errno.h> #include <hip/hip_runtime.h> #include <stdio.h> #define N_ELEMS 16 #define THREADS 4 // Prototype __global__ void dot_prod(int *a, int *b, int *c); __host__ void ints(int *m, int N); __host__ void print_array(int *a, int N); int main(void) { int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N_ELEMS * sizeof(int); // Allocate space for host copies of a, b, c a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(sizeof(int)); // Setup input values ints(a, N_ELEMS); ints(b, N_ELEMS); // Allocate space for device copies of a, b, c hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, sizeof(int)); // Copy inputs to device hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); // Call the kernel on GPU hipLaunchKernelGGL(( dot_prod), dim3(N_ELEMS/THREADS), dim3(THREADS) , 0, 0, d_a, d_b, d_c); // Copy result back to host hipMemcpy(c, d_c, sizeof(int), hipMemcpyDeviceToHost); // Check the result print_array(a, N_ELEMS); print_array(b, N_ELEMS); printf("%d\n", *c); // Cleanup free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return(EXIT_SUCCESS); } // Vector addition (on device) __global__ void dot_prod(int *a, int *b, int *c) { __shared__ int tmp[THREADS]; int index = blockIdx.x * blockDim.x + threadIdx.x; tmp[threadIdx.x] = a[index] * b[index]; __syncthreads(); if(0 == threadIdx.x) { int sum = 0; for(int i = 0; i < THREADS; i++) { sum += tmp[i]; } atomicAdd(c, sum); // atomic operation to avoid race condition } } // Initialisation __host__ void ints(int *m, int N) { int i; for(i = 0; i < N; i++) m[i] = 1; } // Print the elements of the array __host__ void print_array(int *a, int N) { for(int i = 0; i < N; i++) { printf("%d\t", a[i]); } printf("\n"); }
854a3129c3b3929284afc87748e03121c7a74644.cu
// CUDA programming // Exercise n. 10 #include <errno.h> #include <cuda.h> #include <stdio.h> #define N_ELEMS 16 #define THREADS 4 // Prototype __global__ void dot_prod(int *a, int *b, int *c); __host__ void ints(int *m, int N); __host__ void print_array(int *a, int N); int main(void) { int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N_ELEMS * sizeof(int); // Allocate space for host copies of a, b, c a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(sizeof(int)); // Setup input values ints(a, N_ELEMS); ints(b, N_ELEMS); // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, sizeof(int)); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Call the kernel on GPU dot_prod<<< N_ELEMS/THREADS, THREADS >>>(d_a, d_b, d_c); // Copy result back to host cudaMemcpy(c, d_c, sizeof(int), cudaMemcpyDeviceToHost); // Check the result print_array(a, N_ELEMS); print_array(b, N_ELEMS); printf("%d\n", *c); // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return(EXIT_SUCCESS); } // Vector addition (on device) __global__ void dot_prod(int *a, int *b, int *c) { __shared__ int tmp[THREADS]; int index = blockIdx.x * blockDim.x + threadIdx.x; tmp[threadIdx.x] = a[index] * b[index]; __syncthreads(); if(0 == threadIdx.x) { int sum = 0; for(int i = 0; i < THREADS; i++) { sum += tmp[i]; } atomicAdd(c, sum); // atomic operation to avoid race condition } } // Initialisation __host__ void ints(int *m, int N) { int i; for(i = 0; i < N; i++) m[i] = 1; } // Print the elements of the array __host__ void print_array(int *a, int N) { for(int i = 0; i < N; i++) { printf("%d\t", a[i]); } printf("\n"); }