hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
3a3879cdfe692cc76a3dbce8d219bc7ed3f9c0b5.hip
// !!! This is a file automatically generated by hipify!!! #include "../include/cbf_generator.hpp" #include "../include/hpc_helpers.hpp" #include "../include/binary_IO.hpp" typedef uint64_t index_t; typedef uint8_t label_t; typedef float value_t; template < typename index_t, typename value_t> value_t plain_dtw( value_t * query, value_t * subject, index_t num_features) { // for convenient indexing const index_t lane = num_features+1; // allocate the matrix of M value_t * penalty = new value_t[lane*lane]; // initialize the matrix M for (index_t index = 1; index < lane-1; index++) { penalty[index] = INFINITY; penalty[index*lane] = INFINITY; } penalty[0] = 0; // traverse graph in row-major order for (index_t row = 1; row < lane; row++) { const value_t q_value = query[row-1]; for (index_t col = 1; col < lane; col++) { // determine contribution from incoming edges const value_t diag = penalty[(row-1)*lane+col-1]; const value_t abve = penalty[(row-1)*lane+col+0]; const value_t left = penalty[(row+0)*lane+col-1]; // compute residue between query and subject const value_t residue = q_value-subject[col-1]; // relax node penalty[row*lane+col] = residue*residue + min(diag, min(abve, left)); } } // report the lower right cell and free memory const value_t result = penalty[lane*lane-1]; delete [] penalty; return result; } template < typename index_t, typename value_t> value_t dtw( value_t * query, value_t * subject, index_t num_features) { const index_t lane = num_features+1; value_t * penalty = new value_t[2*lane]; for (index_t index = 0; index < lane; index++) penalty[index+1] = INFINITY; penalty[0] = 0; for (index_t row = 1; row < lane; row++) { const value_t q_value = query[row-1]; const index_t target_row = row & 1; const index_t source_row = !target_row; if (row == 2) penalty[target_row*lane] = INFINITY; for (index_t col = 1; col < lane; col++) { const value_t diag = penalty[source_row*lane+col-1]; const value_t abve = penalty[source_row*lane+col+0]; const value_t left = penalty[target_row*lane+col-1]; const value_t residue = q_value-subject[col-1]; penalty[target_row*lane+col] = residue*residue + min(diag, min(abve, left)); } } const index_t last_row = num_features & 1; const value_t result = penalty[last_row*lane+num_features]; delete [] penalty; return result; } #include <omp.h> template < typename index_t, typename value_t> void host_dtw( value_t * query, value_t * subject, value_t * dist, index_t num_entries, index_t num_features) { # pragma omp parallel for for (index_t entry = 0; entry < num_entries; entry++) dist[entry] = dtw(query, subject+entry*num_features, num_features); } int main () { constexpr index_t num_features = 128; constexpr index_t num_entries = 1UL << 20; // small letters for hosts, capital letters for device value_t * data = nullptr, * dist = nullptr; label_t * labels = nullptr; // malloc memory hipHostMalloc(&data, sizeof(value_t)*num_entries*num_features); CUERR hipHostMalloc(&dist, sizeof(value_t)*num_entries); CUERR hipHostMalloc(&labels, sizeof(label_t)*num_entries); CUERR // create CBF data set on host TIMERSTART(generate_data) generate_cbf(data, labels, num_entries, num_features); TIMERSTOP(generate_data) TIMERSTART(DTW_openmp) host_dtw(data, data, dist, num_entries, num_features); TIMERSTOP(DTW_openmp) for (index_t index = 0; index < 10; index++) std::cout << index_t(labels[index]) << " " << dist[index] << std::endl; // get rid of the memory hipHostFree(labels); hipHostFree(data); hipHostFree(dist); }
3a3879cdfe692cc76a3dbce8d219bc7ed3f9c0b5.cu
#include "../include/cbf_generator.hpp" #include "../include/hpc_helpers.hpp" #include "../include/binary_IO.hpp" typedef uint64_t index_t; typedef uint8_t label_t; typedef float value_t; template < typename index_t, typename value_t> value_t plain_dtw( value_t * query, value_t * subject, index_t num_features) { // for convenient indexing const index_t lane = num_features+1; // allocate the matrix of M value_t * penalty = new value_t[lane*lane]; // initialize the matrix M for (index_t index = 1; index < lane-1; index++) { penalty[index] = INFINITY; penalty[index*lane] = INFINITY; } penalty[0] = 0; // traverse graph in row-major order for (index_t row = 1; row < lane; row++) { const value_t q_value = query[row-1]; for (index_t col = 1; col < lane; col++) { // determine contribution from incoming edges const value_t diag = penalty[(row-1)*lane+col-1]; const value_t abve = penalty[(row-1)*lane+col+0]; const value_t left = penalty[(row+0)*lane+col-1]; // compute residue between query and subject const value_t residue = q_value-subject[col-1]; // relax node penalty[row*lane+col] = residue*residue + min(diag, min(abve, left)); } } // report the lower right cell and free memory const value_t result = penalty[lane*lane-1]; delete [] penalty; return result; } template < typename index_t, typename value_t> value_t dtw( value_t * query, value_t * subject, index_t num_features) { const index_t lane = num_features+1; value_t * penalty = new value_t[2*lane]; for (index_t index = 0; index < lane; index++) penalty[index+1] = INFINITY; penalty[0] = 0; for (index_t row = 1; row < lane; row++) { const value_t q_value = query[row-1]; const index_t target_row = row & 1; const index_t source_row = !target_row; if (row == 2) penalty[target_row*lane] = INFINITY; for (index_t col = 1; col < lane; col++) { const value_t diag = penalty[source_row*lane+col-1]; const value_t abve = penalty[source_row*lane+col+0]; const value_t left = penalty[target_row*lane+col-1]; const value_t residue = q_value-subject[col-1]; penalty[target_row*lane+col] = residue*residue + min(diag, min(abve, left)); } } const index_t last_row = num_features & 1; const value_t result = penalty[last_row*lane+num_features]; delete [] penalty; return result; } #include <omp.h> template < typename index_t, typename value_t> void host_dtw( value_t * query, value_t * subject, value_t * dist, index_t num_entries, index_t num_features) { # pragma omp parallel for for (index_t entry = 0; entry < num_entries; entry++) dist[entry] = dtw(query, subject+entry*num_features, num_features); } int main () { constexpr index_t num_features = 128; constexpr index_t num_entries = 1UL << 20; // small letters for hosts, capital letters for device value_t * data = nullptr, * dist = nullptr; label_t * labels = nullptr; // malloc memory cudaMallocHost(&data, sizeof(value_t)*num_entries*num_features); CUERR cudaMallocHost(&dist, sizeof(value_t)*num_entries); CUERR cudaMallocHost(&labels, sizeof(label_t)*num_entries); CUERR // create CBF data set on host TIMERSTART(generate_data) generate_cbf(data, labels, num_entries, num_features); TIMERSTOP(generate_data) TIMERSTART(DTW_openmp) host_dtw(data, data, dist, num_entries, num_features); TIMERSTOP(DTW_openmp) for (index_t index = 0; index < 10; index++) std::cout << index_t(labels[index]) << " " << dist[index] << std::endl; // get rid of the memory cudaFreeHost(labels); cudaFreeHost(data); cudaFreeHost(dist); }
cab6d4114d28c18918168452a4f26a4bb10b2f69.hip
// !!! This is a file automatically generated by hipify!!! /* ============================================================================ Name : GPUexplore.cu Author : Anton Wijs and Thomas Neele Version : Copyright : Copyright Anton Wijs and Thomas Neele Description : CUDA GPUexplore: On the fly state space analysis ============================================================================ */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <assert.h> #include <time.h> #include <math.h> // type of elements used #define inttype uint32_t // type of indices in hash table #define indextype uint64_t enum BucketEntryStatus { EMPTY, TAKEN, FOUND }; enum PropertyStatus { NONE, DEADLOCK, SAFETY, LIVENESS }; #define MIN(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a < _b ? _a : _b; }) #define MAX(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a > _b ? _a : _b; }) // Nr of tiles processed in single kernel launch //#define TILEITERS 10 static const int WARPSIZE = 32; static const int HALFWARPSIZE = 16; static const int INTSIZE = 32; static const int BUFFERSIZE = 256; // GPU constants __constant__ inttype d_nrbuckets; __constant__ inttype d_shared_q_size; __constant__ inttype d_nr_procs; __constant__ inttype d_max_buf_ints; __constant__ inttype d_sv_nints; __constant__ inttype d_bits_act; __constant__ inttype d_nbits_offset; __constant__ inttype d_kernel_iters; __constant__ inttype d_nbits_syncbits_offset; __constant__ PropertyStatus d_property; __constant__ inttype d_apply_por; __constant__ inttype d_check_cycle_proviso; // GPU shared memory array extern __shared__ volatile inttype shared[]; // thread ids #define WARP_ID (threadIdx.x / WARPSIZE) #define GLOBAL_WARP_ID (((blockDim.x / WARPSIZE)*blockIdx.x)+WARP_ID) #define NR_WARPS ((blockDim.x / WARPSIZE)*gridDim.x) #define LANE (threadIdx.x % WARPSIZE) #define HALFLANE (threadIdx.x % HALFWARPSIZE) //#define ENTRY_ID (LANE % d_sv_nints) #define ENTRY_ID (HALFLANE % d_sv_nints) #define GROUP_ID (LANE % d_nr_procs) #define GROUP_GID (WARP_ID * GROUPS_PER_WARP + LANE / d_nr_procs) #define NR_GROUPS ((blockDim.x / WARPSIZE) * GROUPS_PER_WARP) #define GROUPS_PER_WARP (WARPSIZE / d_nr_procs) // Group id to lane and lane to group id macros #define GTL(i) (LANE - GROUP_ID + (i)) #define LTG(i) ((i) - (LANE - GROUP_ID)) //#define NREL_IN_BUCKET ((WARPSIZE / d_sv_nints)) #define NREL_IN_BUCKET ((HALFWARPSIZE / d_sv_nints)*2) #define NREL_IN_BUCKET_HOST ((HALFWARPSIZE / sv_nints)*2) // constant for cuckoo hashing (Alcantara et al) static const inttype P = 979946131; // Retry constant to determine number of retries for element insertion #define RETRYFREQ 7 #define NR_HASH_FUNCTIONS 8 // Number of retries in local cache #define CACHERETRYFREQ 20 // Maximum size of state vectors (in nr. of 32-bit integers) #define MAX_SIZE 9 // Empty state vectors static const inttype EMPTYVECT32 = 0x7FFFFFFF; // Constant to indicate that no more work is required # define EXPLORATION_DONE 0x7FFFFFFF // offset in shared memory from which loaded data can be read static const int SH_OFFSET = 5; //static const int KERNEL_ITERS = 10; //static const int NR_OF_BLOCKS = 3120; //static const int BLOCK_SIZE = 512; static const int KERNEL_ITERS = 1; static const int NR_OF_BLOCKS = 1; static const int BLOCK_SIZE = 32; const size_t Mb = 1<<20; // test macros #define PRINTTHREADID() {printf("Hello thread %d\n", (blockIdx.x*blockDim.x)+threadIdx.x);} #define PRINTTHREAD(j, i) {printf("%d: Seen by thread %d: %d\n", (j), (blockIdx.x*blockDim.x)+threadIdx.x, (i));} // Offsets calculations for shared memory arrays #define HASHCONSTANTSLEN (2*NR_HASH_FUNCTIONS) #define VECTORPOSLEN (d_nr_procs+1) #define LTSSTATESIZELEN (d_nr_procs) #define OPENTILELEN (d_sv_nints*NR_GROUPS) #define LASTSEARCHLEN (blockDim.x/WARPSIZE) #define TGTSTATELEN (blockDim.x*d_sv_nints) #define THREADBUFFERLEN (NR_GROUPS*(THREADBUFFERSHARED+(d_nr_procs*d_max_buf_ints))) #define HASHCONSTANTSOFFSET (SH_OFFSET) #define VECTORPOSOFFSET (HASHCONSTANTSOFFSET+HASHCONSTANTSLEN) #define LTSSTATESIZEOFFSET (VECTORPOSOFFSET+VECTORPOSLEN) #define OPENTILEOFFSET (LTSSTATESIZEOFFSET+LTSSTATESIZELEN) #define LASTSEARCHOFFSET (OPENTILEOFFSET+OPENTILELEN) #define TGTSTATEOFFSET (LASTSEARCHOFFSET+LASTSEARCHLEN) #define THREADBUFFEROFFSET (TGTSTATEOFFSET+TGTSTATELEN) #define CACHEOFFSET (THREADBUFFEROFFSET+THREADBUFFERLEN) // One int for sync action counter // One int for POR counter #define THREADBUFFERSHARED 2 // parameter is thread id #define THREADBUFFERGROUPSTART(i) (THREADBUFFEROFFSET+ (((i) / WARPSIZE)*GROUPS_PER_WARP+(((i) % WARPSIZE) / d_nr_procs)) * (THREADBUFFERSHARED+(d_nr_procs*d_max_buf_ints))) // parameter is group id #define THREADBUFFERGROUPPOS(i, j) shared[tbgs+THREADBUFFERSHARED+((i)*d_max_buf_ints)+(j)] #define THREADGROUPCOUNTER shared[tbgs] #define THREADGROUPPOR shared[tbgs + 1] #define THREADINGROUP (LANE < (GROUPS_PER_WARP)*d_nr_procs) #define STATESIZE(i) (shared[LTSSTATESIZEOFFSET+(i)]) #define VECTORSTATEPOS(i) (shared[VECTORPOSOFFSET+(i)]) #define NR_OF_STATES_IN_TRANSENTRY(i) ((31 - d_bits_act) / shared[LTSSTATESIZEOFFSET+(i)]) // SM local progress flags #define ITERATIONS (shared[0]) #define CONTINUE (shared[1]) #define OPENTILECOUNT (shared[2]) #define WORKSCANRESULT (shared[3]) #define SCAN (shared[4]) // BIT MANIPULATION MACROS #define SETBIT(i, x) {(x) = ((1<<(i)) | (x));} #define GETBIT(i, x) (((x) >> (i)) & 1) #define SETBITS(i, j, x) {(x) = (x) | (((1<<(j))-1)^((1<<(i))-1));} #define GETBITS(x, y, start, len) {(x) = ((y) >> (start)) & ((1 << (len)) - 1);} #define GETPROCTRANSACT(a, t) GETBITS(a, t, 1, d_bits_act) #define GETPROCTRANSSYNC(a, t) {(a) = ((t) & 1);} #define GETPROCTRANSSTATE(a, t, i, j) GETBITS(a, t, 1+d_bits_act+(i)*STATESIZE(j), STATESIZE(j)) #define GETTRANSOFFSET(a, t, i) GETBITS(a, t, (i)*d_nbits_offset, d_nbits_offset) #define GETSYNCOFFSET(a, t, i) GETBITS(a, t, (i)*d_nbits_syncbits_offset, d_nbits_syncbits_offset) #define GETSTATEVECTORSTATE(b, t, i) { asm("{\n\t" \ " .reg .u64 t1;\n\t" \ " mov.b64 t1,{%1,%2};\n\t" \ " bfe.u64 t1, t1, %3, %4;\n\t" \ " cvt.u32.u64 %0,t1;\n\t" \ "}" : "=r"(b) : "r"((t)[VECTORSTATEPOS(i)/INTSIZE]), "r"(VECTORSTATEPOS(i)/INTSIZE == (VECTORSTATEPOS((i)+1)-1)/INTSIZE ? 0 : (t)[VECTORSTATEPOS(i)/INTSIZE+1]), \ "r"(VECTORSTATEPOS(i)%INTSIZE), "r"(VECTORSTATEPOS(i+1)-VECTORSTATEPOS(i))); \ } #define SETSTATEVECTORSTATE(t, i, x) { asm("bfi.b32 %0, %1, %0, %2, %3;" \ : "+r"((t)[VECTORSTATEPOS(i)/INTSIZE]) : \ "r"(x), "r"(VECTORSTATEPOS(i)%INTSIZE), "r"(VECTORSTATEPOS((i)+1)-VECTORSTATEPOS(i))); \ if (VECTORSTATEPOS(i)/INTSIZE != (VECTORSTATEPOS((i)+1)-1)/INTSIZE) { \ asm("bfi.b32 %0, %1, %0, %2, %3;" \ : "+r"((t)[VECTORSTATEPOS(i+1)/INTSIZE]) : \ "r"((x)>>(INTSIZE - (VECTORSTATEPOS(i) % INTSIZE))), "r"(0), "r"(VECTORSTATEPOS((i)+1) % INTSIZE)); \ } \ } // NEEDS FIX: USE BIT 32 OF FIRST INTEGER TO INDICATE STATE OR NOT (1 or 0), IN CASE MULTIPLE INTEGERS ARE USED FOR STATE VECTOR!!! //#define ISSTATE(t) ((t)[(d_sv_nints-1)] != EMPTYVECT32) #define ISSTATE(t) ((t)[0] != EMPTYVECT32) #define SETNEWSTATE(t) { (t)[(d_sv_nints-1)] = (t)[(d_sv_nints-1)] | 0x80000000;} #define SETOLDSTATE(t) { (t)[(d_sv_nints-1)] = (t)[(d_sv_nints-1)] & 0x7FFFFFFF;} #define ISNEWSTATE(t) ((t)[(d_sv_nints-1)] >> 31) #define ISNEWSTATE_HOST(t) ((t)[(sv_nints-1)] >> 31) #define ISNEWINT(t) ((t) >> 31) #define OLDINT(t) ((t) & 0x7FFFFFFF) #define NEWINT(t) ((t) | 0x80000000) #define SETPORSTATE(t) { (t)[(d_sv_nints-1)] = (t)[(d_sv_nints-1)] | 0x40000000;} #define SETOTHERSTATE(t) { (t)[(d_sv_nints-1)] = (t)[(d_sv_nints-1)] & 0xBFFFFFFF;} #define ISPORSTATE(t) (ISPORINT((t)[(d_sv_nints-1))) #define ISPORSTATE_HOST(t) (ISPORINT((t)[(sv_nints-1))) #define ISPORINT(t) (((t) & 0x40000000) >> 30) #define OTHERINT(t) ((t) & 0xBFFFFFFF) #define PORINT(t) ((t) | 0x40000000) #define STATE_FLAGS_MASK (d_apply_por ? 0x3FFFFFFF : 0x7FFFFFFF) #define STRIPSTATE(t) {(t)[(d_sv_nints-1)] = (t)[(d_sv_nints-1)] & STATE_FLAGS_MASK;} #define STRIPPEDSTATE(t, i) ((i == d_sv_nints-1) ? ((t)[i] & STATE_FLAGS_MASK) : (t)[i]) #define STRIPPEDENTRY(t, i) ((i == d_sv_nints-1) ? ((t) & STATE_FLAGS_MASK) : (t)) #define STRIPPEDENTRY_HOST(t, i) ((i == sv_nints-1) ? ((t) & (apply_por ? 0x3FFFFFFF : 0x7FFFFFFF)) : (t)) #define NEWSTATEPART(t, i) (((i) == d_sv_nints-1) ? ((t)[d_sv_nints-1] | 0x80000000) : (t)[(i)]) #define COMPAREENTRIES(t1, t2) (((t1) & STATE_FLAGS_MASK) == ((t2) & STATE_FLAGS_MASK)) #define GETSYNCRULE(a, t, i) GETBITS(a, t, (i)*d_nr_procs, d_nr_procs) // HASH TABLE MACROS // Return 0 if not found, bit 2 is flag for new state, bit 3 is a flag for POR state, 8 if cache is full __device__ inttype STOREINCACHE(volatile inttype* t, inttype* cache, inttype* address) { inttype bi, bj, bk, bl, bitmask; indextype hashtmp; STRIPSTATE(t); hashtmp = 0; for (bi = 0; bi < d_sv_nints; bi++) { hashtmp += t[bi]; hashtmp <<= 5; } bitmask = d_sv_nints*((inttype) (hashtmp % ((d_shared_q_size - CACHEOFFSET) / d_sv_nints))); SETNEWSTATE(t); bl = 0; while (bl < CACHERETRYFREQ) { bi = atomicCAS((inttype *) &cache[bitmask+(d_sv_nints-1)], EMPTYVECT32, t[d_sv_nints-1]); if (bi == EMPTYVECT32) { for (bj = 0; bj < d_sv_nints-1; bj++) { cache[bitmask+bj] = t[bj]; } *address = bitmask; return 0; } if (COMPAREENTRIES(bi, t[d_sv_nints-1])) { if (d_sv_nints == 1) { *address = bitmask; return 1 + (ISNEWINT(bi) << 1) + (ISPORINT(bi) << 2); } else { for (bj = 0; bj < d_sv_nints-1; bj++) { if (cache[bitmask+bj] != (t)[bj]) { break; } } if (bj == d_sv_nints-1) { *address = bitmask; return 1 + (ISNEWINT(bi) << 1) + (ISPORINT(bi) << 2); } } } if (!ISNEWINT(bi)) { bj = atomicCAS((inttype *) &cache[bitmask+(d_sv_nints-1)], bi, t[d_sv_nints-1]); if (bi == bj) { for (bk = 0; bk < d_sv_nints-1; bk++) { cache[bitmask+bk] = t[bk]; } *address = bitmask; return 0; } } bl++; bitmask += d_sv_nints; if ((bitmask+(d_sv_nints-1)) >= (d_shared_q_size - CACHEOFFSET)) { bitmask = 0; } } return 8; } // Mark the state in the cache according to markNew // This function is used while applying POR to decide whether the cycle proviso // is satisfied. __device__ void MARKINCACHE(volatile inttype* t, inttype* cache, int markNew) { inttype bi, bj, bl, bitmask; indextype hashtmp; STRIPSTATE(t); hashtmp = 0; for (bi = 0; bi < d_sv_nints; bi++) { hashtmp += t[bi]; hashtmp <<= 5; } bitmask = d_sv_nints*((inttype) (hashtmp % ((d_shared_q_size - CACHEOFFSET) / d_sv_nints))); SETNEWSTATE(t); bl = 0; while (bl < CACHERETRYFREQ) { bi = cache[bitmask+(d_sv_nints-1)]; if (COMPAREENTRIES(bi, t[d_sv_nints-1])) { for (bj = 0; bj < d_sv_nints-1; bj++) { if (cache[bitmask+bj] != (t)[bj]) { break; } } if (bj == d_sv_nints-1) { if(markNew) { cache[bitmask+(d_sv_nints-1)] = NEWINT(OTHERINT(cache[bitmask+(d_sv_nints-1)] & STATE_FLAGS_MASK)); } else if(ISPORINT(bi) && ISNEWINT(bi)){ atomicCAS((inttype*) &cache[bitmask+(d_sv_nints-1)], bi, OLDINT(bi)); } return; } } bl++; bitmask += d_sv_nints; if ((bitmask+(d_sv_nints-1)) >= (d_shared_q_size - CACHEOFFSET)) { bitmask = 0; } } } // hash functions use bj variable #define FIRSTHASH(a, t) { hashtmp = 0; \ for (bj = 0; bj < d_sv_nints; bj++) { \ hashtmp += STRIPPEDSTATE(t,bj); \ hashtmp <<= 5; \ } \ hashtmp = (indextype) (d_h[0]*hashtmp+d_h[1]); \ (a) = WARPSIZE*((inttype)(hashtmp % P) % d_nrbuckets); \ } #define FIRSTHASHHOST(a) { indextype hashtmp = 0; \ hashtmp = (indextype) h[1]; \ (a) = WARPSIZE*((inttype) ((hashtmp % P) % q_size/WARPSIZE)); \ } #define HASHALL(a, i, t) { hashtmp = 0; \ for (bj = 0; bj < d_sv_nints; bj++) { \ hashtmp += STRIPPEDSTATE(t,bj); \ hashtmp <<= 5; \ } \ hashtmp = (indextype) (shared[HASHCONSTANTSOFFSET+(2*(i))]*(hashtmp)+shared[HASHCONSTANTSOFFSET+(2*(i))+1]); \ (a) = WARPSIZE*((inttype)(hashtmp % P) % d_nrbuckets); \ } #define HASHFUNCTION(a, i, t) ((HASHALL((a), (i), (t)))) #define COMPAREVECTORS(a, t1, t2) { (a) = 1; \ for (bk = 0; bk < d_sv_nints-1; bk++) { \ if ((t1)[bk] != (t2)[bk]) { \ (a) = 0; break; \ } \ } \ if ((a)) { \ if (STRIPPEDSTATE((t1),bk) != STRIPPEDSTATE((t2),bk)) { \ (a) = 0; \ } \ } \ } // check if bucket element associated with lane is a valid position to store data #define LANEPOINTSTOVALIDBUCKETPOS (HALFLANE < ((HALFWARPSIZE / d_sv_nints)*d_sv_nints)) __device__ inttype LANE_POINTS_TO_EL(inttype i) { if (i < HALFWARPSIZE / d_sv_nints) { return (LANE >= i*d_sv_nints && LANE < (i+1)*d_sv_nints); } else { return (LANE >= HALFWARPSIZE+(i-(HALFWARPSIZE / d_sv_nints))*d_sv_nints && LANE < HALFWARPSIZE+(i-(HALFWARPSIZE / d_sv_nints)+1)*d_sv_nints); } } // start position of element i in bucket #define STARTPOS_OF_EL_IN_BUCKET(i) ((i < (HALFWARPSIZE / d_sv_nints)) ? (i*d_sv_nints) : (HALFWARPSIZE + (i-(HALFWARPSIZE/d_sv_nints))*d_sv_nints)) #define STARTPOS_OF_EL_IN_BUCKET_HOST(i) ((i < (HALFWARPSIZE / sv_nints)) ? (i*sv_nints) : (HALFWARPSIZE + (i-(HALFWARPSIZE/sv_nints))*sv_nints)) // find or put element, warp version. t is element stored in block cache __device__ inttype FINDORPUT_WARP(inttype* t, inttype* d_q, volatile inttype* d_newstate_flags, inttype claim_work) { inttype bi, bj, bk, bl, bitmask; indextype hashtmp; inttype hash; BucketEntryStatus threadstatus; // prepare bitmask once to reason about results of threads in the same (state vector) group bitmask = 0; if (LANEPOINTSTOVALIDBUCKETPOS) { SETBITS(LANE-ENTRY_ID, LANE-ENTRY_ID+d_sv_nints, bitmask); } for (bi = 0; bi < NR_HASH_FUNCTIONS; bi++) { HASHFUNCTION(hash, bi, t); bl = d_q[hash+LANE]; bk = __ballot(STRIPPEDENTRY(bl, ENTRY_ID) == STRIPPEDSTATE(t, ENTRY_ID)); // threadstatus is used to determine whether full state vector has been found threadstatus = EMPTY; if (LANEPOINTSTOVALIDBUCKETPOS) { if ((bk & bitmask) == bitmask) { threadstatus = FOUND; } } if (__ballot(threadstatus == FOUND) != 0) { // state vector has been found in bucket. mark local copy as old. if (LANE == 0) { SETOLDSTATE(t); } return 1; } // try to find empty position to insert new state vector threadstatus = (bl == EMPTYVECT32 && LANEPOINTSTOVALIDBUCKETPOS) ? EMPTY : TAKEN; // let bk hold the smallest index of an available empty position bk = __ffs(__ballot(threadstatus == EMPTY)); while (bk != 0) { // write the state vector bk--; if (LANE >= bk && LANE < bk+d_sv_nints) { bl = atomicCAS(&(d_q[hash+LANE]), EMPTYVECT32, t[ENTRY_ID]); if (bl == EMPTYVECT32) { // success if (ENTRY_ID == d_sv_nints-1) { SETOLDSTATE(t); } // try to claim the state vector for future work bl = OPENTILELEN; if (ENTRY_ID == d_sv_nints-1) { // try to increment the OPENTILECOUNT counter if (claim_work && (bl = atomicAdd((inttype *) &OPENTILECOUNT, d_sv_nints)) < OPENTILELEN) { d_q[hash+LANE] = t[d_sv_nints-1]; } else { // There is work available for some block __threadfence(); d_newstate_flags[(hash / blockDim.x) % gridDim.x] = 1; } } // all active threads read the OPENTILECOUNT value of the last thread, and possibly store their part of the vector in the shared memory bl = __shfl(bl, LANE-ENTRY_ID+d_sv_nints-1); if (bl < OPENTILELEN) { // write part of vector to shared memory shared[OPENTILEOFFSET+bl+ENTRY_ID] = NEWSTATEPART(t, ENTRY_ID); } // write was successful. propagate this to the whole warp by setting threadstatus to FOUND threadstatus = FOUND; } else { // write was not successful. check if the state vector now in place equals the one we are trying to insert bk = __ballot(STRIPPEDENTRY(bl, ENTRY_ID) == STRIPPEDSTATE(t, ENTRY_ID)); if ((bk & bitmask) == bitmask) { // state vector has been found in bucket. mark local copy as old. if (LANE == bk) { SETOLDSTATE(t); } // propagate this result to the whole warp threadstatus = FOUND; } else { // state vector is different, and position in bucket is taken threadstatus = TAKEN; } } } // check if the state vector was either encountered or inserted if (__ballot(threadstatus == FOUND) != 0) { return 1; } // recompute bk bk = __ffs(__ballot(threadstatus == EMPTY)); } } return 0; } // find element, warp version. t is element stored in block cache // return 0 if not found or found and new, 1 if found and old __device__ inttype FIND_WARP(inttype* t, inttype* d_q) { inttype bi, bj, bk, bl, bitmask; indextype hashtmp; BucketEntryStatus threadstatus; // prepare bitmask once to reason about results of threads in the same (state vector) group bitmask = 0; if (LANEPOINTSTOVALIDBUCKETPOS) { SETBITS(LANE-ENTRY_ID, LANE-ENTRY_ID+d_sv_nints, bitmask); } for (bi = 0; bi < NR_HASH_FUNCTIONS; bi++) { HASHFUNCTION(hashtmp, bi, t); bl = d_q[hashtmp+LANE]; bk = __ballot(STRIPPEDENTRY(bl, ENTRY_ID) == STRIPPEDSTATE(t, ENTRY_ID)); // threadstatus is used to determine whether full state vector has been found threadstatus = EMPTY; if (LANEPOINTSTOVALIDBUCKETPOS) { if ((bk & bitmask) == bitmask) { threadstatus = FOUND; } } if (__ballot(threadstatus == FOUND) != 0) { // state vector has been found in bucket. mark local copy as old. if (threadstatus == FOUND & ISNEWINT(bl) == 0 & ENTRY_ID == d_sv_nints - 1) { SETOLDSTATE(t); } SETPORSTATE(t); return __ballot(threadstatus == FOUND & ISNEWINT(bl) == 0 & ENTRY_ID == d_sv_nints - 1); } // try to find empty position threadstatus = (bl == EMPTYVECT32 && LANEPOINTSTOVALIDBUCKETPOS) ? EMPTY : TAKEN; if(__any(threadstatus == EMPTY)) { // There is an empty slot in this bucket and the state vector was not found // State will also not be found after rehashing, so we return 0 SETPORSTATE(t); return 0; } } SETPORSTATE(t); return 0; } // macro to print state vector #define PRINTVECTOR(s) { printf ("("); \ for (bk = 0; bk < d_nr_procs; bk++) { \ GETSTATEVECTORSTATE(bj, (s), bk) \ printf ("%d", bj); \ if (bk < (d_nr_procs-1)) { \ printf (","); \ } \ } \ printf (")\n"); \ } int vmem = 0; // GPU textures texture<inttype, 1, hipReadModeElementType> tex_proc_offsets_start; texture<inttype, 1, hipReadModeElementType> tex_proc_offsets; texture<inttype, 1, hipReadModeElementType> tex_proc_trans_start; texture<inttype, 1, hipReadModeElementType> tex_proc_trans; texture<inttype, 1, hipReadModeElementType> tex_syncbits_offsets; texture<inttype, 1, hipReadModeElementType> tex_syncbits; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } //wrapper around hipMalloc to count allocated memory and check for error while allocating int cudaMallocCount ( void ** ptr,int size) { hipError_t err = hipSuccess; vmem += size; err = hipMalloc(ptr,size); if (err) { printf("Error %s at line %d in file %s\n", hipGetErrorString(err), __LINE__, __FILE__); exit(1); } fprintf (stdout, "allocated %d\n", size); return size; } //test function to print a given state vector void print_statevector(FILE* stream, inttype *state, inttype *firstbit_statevector, inttype nr_procs, inttype sv_nints, inttype apply_por) { inttype i, s, bitmask; for (i = 0; i < nr_procs; i++) { bitmask = 0; if (firstbit_statevector[i]/INTSIZE == firstbit_statevector[i+1]/INTSIZE) { bitmask = (((1<<(firstbit_statevector[i+1] % INTSIZE))-1)^((1<<(firstbit_statevector[i] % INTSIZE))-1)); s = (state[firstbit_statevector[i]/INTSIZE] & bitmask) >> (firstbit_statevector[i] % INTSIZE); } else { bitmask = 1 << (firstbit_statevector[i+1] % INTSIZE); s = (state[firstbit_statevector[i]/INTSIZE] >> (firstbit_statevector[i] % INTSIZE) | (state[firstbit_statevector[i+1]/INTSIZE] & bitmask) << (INTSIZE - (firstbit_statevector[i] % INTSIZE))); \ } fprintf (stream, "%d", s); if (i < (nr_procs-1)) { fprintf (stream, ","); } } fprintf (stream, " "); for (i = 0; i < sv_nints; i++) { fprintf (stream, "%d ", STRIPPEDENTRY_HOST(state[i], i)); } fprintf (stream, "\n"); } //test function to print the contents of the device queue void print_queue(inttype *d_q, inttype q_size, inttype *firstbit_statevector, inttype nr_procs, inttype sv_nints, inttype apply_por) { inttype *q_test = (inttype*) malloc(sizeof(inttype)*q_size); hipMemcpy(q_test, d_q, q_size*sizeof(inttype), hipMemcpyDeviceToHost); inttype nw; int count = 0; int newcount = 0; for (inttype i = 0; i < (q_size/WARPSIZE); i++) { for (inttype j = 0; j < NREL_IN_BUCKET_HOST; j++) { if (q_test[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)+(sv_nints-1)] != EMPTYVECT32) { count++; nw = ISNEWSTATE_HOST(&q_test[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)]); if (nw) { newcount++; fprintf (stdout, "new: "); } print_statevector(stdout, &(q_test[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)]), firstbit_statevector, nr_procs, sv_nints, apply_por); } } } fprintf (stdout, "nr. of states in hash table: %d (%d unexplored states)\n", count, newcount); } //test function to print the contents of the device queue void print_local_queue(FILE* stream, inttype *q, inttype q_size, inttype *firstbit_statevector, inttype nr_procs, inttype sv_nints, inttype apply_por) { int count = 0, newcount = 0; inttype nw; for (inttype i = 0; i < (q_size/WARPSIZE); i++) { for (inttype j = 0; j < NREL_IN_BUCKET_HOST; j++) { if (q[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)+(sv_nints-1)] != EMPTYVECT32) { count++; nw = ISNEWSTATE_HOST(&q[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)]); if (nw) { newcount++; fprintf (stream, "new: "); } print_statevector(stream, &(q[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)]), firstbit_statevector, nr_procs, sv_nints, apply_por); } } } fprintf (stream, "nr. of states in hash table: %d (%d unexplored states)\n", count, newcount); } //test function to count the contents of the device queue void count_queue(inttype *d_q, inttype q_size, inttype *firstbit_statevector, inttype nr_procs, inttype sv_nints) { inttype *q_test = (inttype*) malloc(sizeof(inttype)*q_size); hipMemcpy(q_test, d_q, q_size*sizeof(inttype), hipMemcpyDeviceToHost); int count = 0; for (inttype i = 0; i < (q_size/WARPSIZE); i++) { for (inttype j = 0; j < NREL_IN_BUCKET_HOST; j++) { if (q_test[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)+(sv_nints-1)] != EMPTYVECT32) { count++; } } } fprintf (stdout, "nr. of states in hash table: %d\n", count); } //test function to count the contents of the host queue void count_local_queue(inttype *q, inttype q_size, inttype *firstbit_statevector, inttype nr_procs, inttype sv_nints) { int count = 0, newcount = 0; inttype nw; inttype nrbuckets = q_size / WARPSIZE; inttype nrels = NREL_IN_BUCKET_HOST; for (inttype i = 0; i < nrbuckets; i++) { for (inttype j = 0; j < nrels; j++) { inttype elpos = STARTPOS_OF_EL_IN_BUCKET_HOST(j); inttype abselpos = (i*WARPSIZE)+elpos+sv_nints-1; inttype q_abselpos = q[abselpos]; if (q_abselpos != EMPTYVECT32) { count++; nw = ISNEWSTATE_HOST(&q[(i*WARPSIZE)+elpos]); if (nw) { newcount++; } } } } fprintf (stdout, "nr. of states in hash table: %d (%d unexplored states)\n", count, newcount); } /** * CUDA kernel function to initialise the queue */ __global__ void init_queue(inttype *d_q, inttype n_elem) { inttype nthreads = blockDim.x*gridDim.x; inttype i = (blockIdx.x *blockDim.x) + threadIdx.x; for(; i < n_elem; i += nthreads) { d_q[i] = (inttype) EMPTYVECT32; } } /** * CUDA kernel to store initial state in hash table */ __global__ void store_initial(inttype *d_q, inttype *d_h, inttype *d_newstate_flags, inttype blockdim, inttype griddim) { inttype bj, hash; indextype hashtmp; inttype state[MAX_SIZE]; for (bj = 0; bj < d_sv_nints; bj++) { state[bj] = 0; } SETNEWSTATE(state); FIRSTHASH(hash, state); for (bj = 0; bj < d_sv_nints; bj++) { d_q[hash+bj] = state[bj]; } d_newstate_flags[(hash / blockdim) % griddim] = 1; } /** * Kernel that counts the amount of states in global memory */ __global__ void count_states(inttype *d_q, inttype *result) { if(threadIdx.x == 0) { shared[0] = 0; } __syncthreads(); int localResult = 0; for(int i = GLOBAL_WARP_ID; i < d_nrbuckets; i += NR_WARPS) { int tmp = d_q[i*WARPSIZE+LANE]; if (ENTRY_ID == (d_sv_nints-1) && tmp != EMPTYVECT32) { localResult++; } } atomicAdd((unsigned int*)shared, localResult); __syncthreads(); if(threadIdx.x == 0) { atomicAdd(result, shared[0]); } } // When the cache overflows, use the whole warp to store states to global memory __device__ void store_cache_overflow_warp(inttype *d_q, volatile inttype *d_newstate_flags, int has_overflow) { while(int c = __ballot(has_overflow)) { int active_lane = __ffs(c) - 1; int bj = FINDORPUT_WARP((inttype*) &shared[TGTSTATEOFFSET + (threadIdx.x-LANE+active_lane)*d_sv_nints], d_q, d_newstate_flags, 0); if(LANE == active_lane) { has_overflow = 0; if(bj == 0) { CONTINUE = 2; } } } } // Copy all states from the cache to global memory __device__ void copy_cache_to_global(inttype *d_q, inttype* cache, volatile inttype *d_newstate_flags) { int k = (d_shared_q_size-CACHEOFFSET)/d_sv_nints; for (int i = WARP_ID; i * WARPSIZE < k; i += (blockDim.x / WARPSIZE)) { int have_new_state = i * WARPSIZE + LANE < k && ISNEWSTATE(&cache[(i*WARPSIZE+LANE)*d_sv_nints]); while (int c = __ballot(have_new_state)) { int active_lane = __ffs(c) - 1; if(FINDORPUT_WARP((inttype*) &cache[(i*WARPSIZE+active_lane)*d_sv_nints], d_q, d_newstate_flags, 1) == 0) { CONTINUE = 2; } if (LANE == active_lane) { have_new_state = 0; } } } } /** * CUDA kernel function for BFS iteration state gathering * Order of data in the shared queue: * (0. index of process LTS states sizes) * (1. index of sync rules offsets) * (2. index of sync rules) * (1. index of open queue tile) * 0. the 'iterations' flag to count the number of iterations so far (nr of tiles processed by SM) * 1. the 'continue' flag for thread work * (4. index of threads buffer) * (5. index of hash table) * 2. constants for d_q hash functions (2 per function, in total 8 by default) * 3. state vector offsets (nr_procs+1 elements) * 4. sizes of states in process LTS states (nr_procs elements) * (9. sync rules + offsets (nr_syncbits_offsets + nr_syncbits elements)) * 5. tile of open queue to be processed by block (sv_nints*(blockDim.x / nr_procs) elements) * 6. buffer for threads ((blockDim.x*max_buf_ints)+(blockDim.x/nr_procs) elements) * 7. hash table */ __global__ void __launch_bounds__(512, 2) gather(inttype *d_q, const inttype *d_h, const inttype *d_bits_state, const inttype *d_firstbit_statevector, inttype *d_contBFS, inttype *d_property_violation, volatile inttype *d_newstate_flags, inttype *d_worktiles, const inttype scan) { inttype i, k, l, index, offset1, offset2, tmp, cont, act, sync_offset1, sync_offset2; volatile inttype* src_state = &shared[OPENTILEOFFSET+d_sv_nints*GROUP_GID]; volatile inttype* tgt_state = &shared[TGTSTATEOFFSET+threadIdx.x*d_sv_nints]; inttype* cache = (inttype*) &shared[CACHEOFFSET]; inttype bitmask, bi; int pos; int tbgs = THREADBUFFERGROUPSTART(threadIdx.x); // TODO // is at least one outgoing transition enabled for a given state (needed to detect deadlocks) inttype outtrans_enabled; // Reset the shared variables if (threadIdx.x < SH_OFFSET) { shared[threadIdx.x] = 0; } // Load the hash constants into shared memory for (int j = threadIdx.x; j < HASHCONSTANTSLEN; j += blockDim.x) { shared[j+HASHCONSTANTSOFFSET] = d_h[j]; } // Load the state sizes and offsets into shared memory for (int j = threadIdx.x; j < VECTORPOSLEN; j += blockDim.x) { VECTORSTATEPOS(j) = d_firstbit_statevector[j]; } for (int j = threadIdx.x; j < LTSSTATESIZELEN; j += blockDim.x) { STATESIZE(j) = d_bits_state[j]; } // Clean the cache for (int j = threadIdx.x; j < (d_shared_q_size - (cache-shared)); j += blockDim.x) { cache[j] = EMPTYVECT32; } if(scan) { // Copy the work tile from global mem if (threadIdx.x < OPENTILELEN + LASTSEARCHLEN) { shared[OPENTILEOFFSET+threadIdx.x] = d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + threadIdx.x]; } if(threadIdx.x == 0) { OPENTILECOUNT = d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + OPENTILELEN + LASTSEARCHLEN]; } } else if (threadIdx.x < OPENTILELEN+LASTSEARCHLEN) { // On first run: initialize the work tile to empty shared[OPENTILEOFFSET+threadIdx.x] = threadIdx.x < OPENTILELEN ? EMPTYVECT32 : 0; } __syncthreads(); while (ITERATIONS < d_kernel_iters) { if (threadIdx.x == 0 && OPENTILECOUNT < OPENTILELEN && d_newstate_flags[blockIdx.x]) { // Indicate that we are scanning d_newstate_flags[blockIdx.x] = 2; SCAN = 1; } __syncthreads(); // Scan the open set for work; we use the OPENTILECOUNT flag at this stage to count retrieved elements if (SCAN) { inttype last_search_location = shared[LASTSEARCHOFFSET + WARP_ID]; // This block should be able to find a new state int found_new_state = 0; for (i = GLOBAL_WARP_ID; i < d_nrbuckets && OPENTILECOUNT < OPENTILELEN; i += NR_WARPS) { int loc = i + last_search_location; if(loc >= d_nrbuckets) { last_search_location = -i + GLOBAL_WARP_ID; loc = i + last_search_location; } tmp = d_q[loc*WARPSIZE+LANE]; l = EMPTYVECT32; if (ENTRY_ID == (d_sv_nints-1)) { if (ISNEWINT(tmp)) { found_new_state = 1; // try to increment the OPENTILECOUNT counter, if successful, store the state l = atomicAdd((uint32_t *) &OPENTILECOUNT, d_sv_nints); if (l < OPENTILELEN) { d_q[loc*WARPSIZE+LANE] = OLDINT(tmp); } } } // all threads read the OPENTILECOUNT value of the 'tail' thread, and possibly store their part of the vector in the shared memory if (LANEPOINTSTOVALIDBUCKETPOS) { l = __shfl(l, LANE-ENTRY_ID+d_sv_nints-1); if (l < OPENTILELEN) { // write part of vector to shared memory shared[OPENTILEOFFSET+l+ENTRY_ID] = tmp; } } } if(i < d_nrbuckets) { last_search_location = i - GLOBAL_WARP_ID; } else { last_search_location = 0; } if(LANE == 0) { // Store the last search location, so we can continue from that point later on shared[LASTSEARCHOFFSET + WARP_ID] = last_search_location; } if(found_new_state || i < d_nrbuckets) { WORKSCANRESULT = 1; } } __syncthreads(); // if work has been retrieved, indicate this if (threadIdx.x == 0) { if (OPENTILECOUNT > 0) { (*d_contBFS) = 1; } if(SCAN && WORKSCANRESULT == 0 && d_newstate_flags[blockIdx.x] == 2) { // Scanning has completed and no new states were found by this block, // save this information to prevent unnecessary scanning later on d_newstate_flags[blockIdx.x] = 0; } else { WORKSCANRESULT = 0; } } // is the thread part of an 'active' group? offset1 = 0; offset2 = 0; // Reset the whole thread buffer (shared + private) int start = THREADBUFFEROFFSET; int end = THREADBUFFEROFFSET + THREADBUFFERLEN; for(int j = start + threadIdx.x; j < end; j+=blockDim.x) { shared[j] = 0; } if (THREADINGROUP) { // Is there work? if (ISSTATE(src_state)) { // Gather the required transition information for all states in the tile i = tex1Dfetch(tex_proc_offsets_start, GROUP_ID); // Determine process state GETSTATEVECTORSTATE(cont, src_state, GROUP_ID); // Offset position index = cont/(INTSIZE/d_nbits_offset); pos = cont - (index*(INTSIZE/d_nbits_offset)); tmp = tex1Dfetch(tex_proc_offsets, i+index); GETTRANSOFFSET(offset1, tmp, pos); if (pos == (INTSIZE/d_nbits_offset)-1) { tmp = tex1Dfetch(tex_proc_offsets, i+index+1); GETTRANSOFFSET(offset2, tmp, 0); } else { GETTRANSOFFSET(offset2, tmp, pos+1); } } } // variable cont is used to indicate whether the buffer content of this thread still needs processing cont = 0; outtrans_enabled = 0; // First, generate successors following from local actions while (1) { i = 1; if(offset1 < offset2) { tmp = tex1Dfetch(tex_proc_trans, offset1); GETPROCTRANSSYNC(i, tmp); } if (__any(i == 0)) { if(i == 0) { // no deadlock outtrans_enabled = 1; // construct state for (int j = 0; j < d_sv_nints; j++) { tgt_state[j] = src_state[j]; } offset1++; } // loop over this transentry for (int j = 0; __any(i == 0 && j < NR_OF_STATES_IN_TRANSENTRY(GROUP_ID)); j++) { if(i == 0) { GETPROCTRANSSTATE(pos, tmp, j, GROUP_ID); if (pos > 0) { SETSTATEVECTORSTATE(tgt_state, GROUP_ID, pos-1); // check for violation of safety property, if required if (d_property == SAFETY) { if (GROUP_ID == d_nr_procs-1) { // pos contains state id + 1 // error state is state 1 if (pos == 2) { // error state found (*d_property_violation) = 1; } } } // store tgt_state in cache // if k == 8, cache is full, immediately store in global hash table k = STOREINCACHE(tgt_state, cache, &bi); } else { i = 1; } } store_cache_overflow_warp(d_q, d_newstate_flags, i == 0 && k == 8); } } else { break; } } // Now there are only synchronizing actions left act = 1 << d_bits_act; // While the hash table is not full and there are transitions left, // explore those transitions while (CONTINUE != 2 && __any(offset1 < offset2 || cont)) { if (offset1 < offset2 && !cont) { // Fill the buffer with transitions with the same action label tmp = tex1Dfetch(tex_proc_trans, offset1); GETPROCTRANSACT(act, tmp); // store transition entry THREADBUFFERGROUPPOS(GROUP_ID,0) = tmp; cont = 1; offset1++; bitmask = act; for (int j = 1; j < d_max_buf_ints; j++) { tmp = 0; if(offset1 < offset2 && act == bitmask) { tmp = tex1Dfetch(tex_proc_trans, offset1); GETPROCTRANSACT(bitmask, tmp); if (act == bitmask) { offset1++; } else { tmp = 0; } } THREADBUFFERGROUPPOS(GROUP_ID,j) = tmp; j++; } } int sync_act = act; if (__popc((__ballot(cont) >> (LANE - GROUP_ID)) & ((1 << d_nr_procs) - 1)) > 1) { // Find the smallest 'sync_act' with butterfly reduction for(int j = 1; j < d_nr_procs; j<<=1) { sync_act = min(__shfl(sync_act, GTL((GROUP_ID + j) % d_nr_procs)), sync_act); } } else { // Only one process with synchronizing transitions left, there will // be no more successors from this state cont = 0; offset1 = offset2; sync_act = 1 << d_bits_act; } // Now, we have obtained the info needed to combine process transitions sync_offset1 = sync_offset2 = 0; // Find out which processes have the smallest 'act' int proc_enabled = (__ballot(act == sync_act) >> (LANE - GROUP_ID)) & ((1 << d_nr_procs) - 1); // Only generate synchronizing successors if there are more that two processes with 'sync_act' enabled if(sync_act < (1 << d_bits_act) && (__popc(proc_enabled) >= 2)) { // syncbits Offset position i = sync_act/(INTSIZE/d_nbits_syncbits_offset); pos = sync_act - (i*(INTSIZE/d_nbits_syncbits_offset)); l = tex1Dfetch(tex_syncbits_offsets, i); GETSYNCOFFSET(sync_offset1, l, pos); pos++; if (pos == (INTSIZE/d_nbits_syncbits_offset)) { l = tex1Dfetch(tex_syncbits_offsets, i+1); pos = 0; } GETSYNCOFFSET(sync_offset2, l, pos); } // iterate through the relevant syncbit filters for (int j = GROUP_ID;__any(sync_offset1 + j / (INTSIZE/d_nr_procs) < sync_offset2);) { tmp = 0; // Keep searching the array with sync rules until we have found an applicable rule or we have reached the end // We don't need to check for THREADINGROUP, since sync_offset1 == sync_offset2 for threads outside a group while(!(tmp != 0 && (tmp & proc_enabled) == tmp) && sync_offset1 + j / (INTSIZE/d_nr_procs) < sync_offset2) { // Fetch the rule index = tex1Dfetch(tex_syncbits, sync_offset1 + j / (INTSIZE/d_nr_procs)); GETSYNCRULE(tmp, index, j % (INTSIZE/d_nr_procs)); // Increase the counter such that threads that have not found an applicable sync rule take a smaller step j += d_nr_procs - __popc((__ballot(tmp != 0 && (tmp & proc_enabled) == tmp) >> (LANE - GROUP_ID)) & ((1 << GROUP_ID) - 1)); } // Find the smallest index j for the next iteration // We don't need to check for THREADINGROUP because there is no thread // outside of a group with GROUP_ID == d_nr_procs - 1 if(j >= d_nr_procs - 1 && THREADGROUPCOUNTER < j) { atomicMax((inttype*) &THREADGROUPCOUNTER, j); } int work_remaining = 0; int has_second_succ = 0; // start combining entries in the buffer to create target states if (tmp != 0 && (tmp & proc_enabled) == tmp) { // source state is not a deadlock outtrans_enabled = 1; // copy src_state into tgt_state for (pos = 0; pos < d_sv_nints; pos++) { tgt_state[pos] = src_state[pos]; } // construct first successor for (int rule = tmp; rule;) { pos = __ffs(rule) - 1; // get first state GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,0), 0, pos); SETSTATEVECTORSTATE(tgt_state, pos, k-1); // Check if this buffer has a second state GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,0), 1, pos); if(d_max_buf_ints > 1 && !k) { GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,1), 0, pos); } if(k) { has_second_succ |= 1 << pos; } rule &= ~(1 << pos); } work_remaining = 1 + has_second_succ; } // while we keep getting new states, store them while (__any(work_remaining)) { l = 0; if(work_remaining) { // check for violation of safety property, if required if (d_property == SAFETY) { GETSTATEVECTORSTATE(pos, tgt_state, d_nr_procs-1); if (pos == 1) { // error state found (*d_property_violation) = 1; } } // store tgt_state in cache; if i == d_shared_q_size, state was found, duplicate detected // if i == d_shared_q_size+1, cache is full, immediately store in global hash table l = STOREINCACHE(tgt_state, cache, &bitmask); if(work_remaining == 1) { // There will be no second successor work_remaining = 0; } } store_cache_overflow_warp(d_q, d_newstate_flags, l == 8); if(work_remaining) { // get next successor by finding the next combination from the buffer // Only look at processes that stored more than one successor in the buffer (has_second_succ) int rule; for (rule = has_second_succ; rule;) { pos = __ffs(rule) - 1; int curr_st; GETSTATEVECTORSTATE(curr_st, tgt_state, pos); int st = 0; int num_states_in_trans = NR_OF_STATES_IN_TRANSENTRY(pos); // We search for the position of the current state in the buffer // We don't have to compare the last position: if curr_st has not been found yet, // then it has to be in the last position for (k = 0; k < d_max_buf_ints * num_states_in_trans - 1; k++) { GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,k / num_states_in_trans), k % num_states_in_trans, pos); if (curr_st == (st-1) || st == 0) { break; } } // Try to get the next element k++; if (k < d_max_buf_ints * num_states_in_trans && st != 0) { // Retrieve next element, insert it in 'tgt_state' if it is not 0, and return result, otherwise continue GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,k / num_states_in_trans), k % num_states_in_trans, pos); if (st > 0) { SETSTATEVECTORSTATE(tgt_state, pos, st-1); break; } } // else, set this process state to first one, and continue to next process GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,0), 0, pos); SETSTATEVECTORSTATE(tgt_state, pos, st-1); rule &= ~(1 << pos); } // did we find a successor? if not, all successors have been generated if (rule == 0) { work_remaining = 0; } } } j = THREADINGROUP ? THREADGROUPCOUNTER + GROUP_ID + 1 : 0; } // only active threads should reset 'cont' if (cont && sync_act == act) { cont = 0; act = 1 << d_bits_act; THREADGROUPCOUNTER = 0; } } // have we encountered a deadlock state? // we use the shared memory to communicate this to the group leaders if (d_property == DEADLOCK) { if (THREADINGROUP) { if (ISSTATE(src_state)) { THREADBUFFERGROUPPOS(GROUP_ID, 0) = outtrans_enabled; // group leader collects results l = 0; if (GROUP_ID == 0) { for (i = 0; i < d_nr_procs; i++) { l += THREADBUFFERGROUPPOS(i, 0); } if (l == 0) { // deadlock state found (*d_property_violation) = 1; } } } } } int performed_work = OPENTILECOUNT != 0; __syncthreads(); // Reset the work tile count if (threadIdx.x == 0) { OPENTILECOUNT = 0; } __syncthreads(); // start scanning the local cache and write results to the global hash table if(performed_work) { copy_cache_to_global(d_q, cache, d_newstate_flags); } __syncthreads(); // Write empty state vector to part of the work tile that is not used if (threadIdx.x < OPENTILELEN - OPENTILECOUNT) { shared[OPENTILEOFFSET+OPENTILECOUNT+threadIdx.x] = EMPTYVECT32; } // Ready to start next iteration, if error has not occurred if (threadIdx.x == 0) { if (CONTINUE == 2) { (*d_contBFS) = 2; ITERATIONS = d_kernel_iters; } else { ITERATIONS++; } CONTINUE = 0; } __syncthreads(); } //Copy the work tile to global mem if (threadIdx.x < OPENTILELEN+LASTSEARCHLEN) { d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + threadIdx.x] = shared[OPENTILEOFFSET+threadIdx.x]; } if(threadIdx.x == 0) { d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + OPENTILELEN+LASTSEARCHLEN] = OPENTILECOUNT; } } __global__ void __launch_bounds__(512, 2) gather_por(inttype *d_q, inttype *d_h, inttype *d_bits_state, inttype *d_firstbit_statevector, inttype *d_proc_offsets_start, inttype *d_proc_offsets, inttype *d_proc_trans, inttype *d_syncbits_offsets, inttype *d_syncbits, inttype *d_contBFS, inttype *d_property_violation, volatile inttype *d_newstate_flags, inttype *d_worktiles, inttype scan) { inttype i, k, l, index, offset1, offset2, tmp, cont, act, sync_offset1, sync_offset2; volatile inttype* src_state = &shared[OPENTILEOFFSET+d_sv_nints*GROUP_GID]; volatile inttype* tgt_state = &shared[TGTSTATEOFFSET+threadIdx.x*d_sv_nints]; inttype* cache = (inttype*) &shared[CACHEOFFSET]; inttype bitmask, bi, bj; int pos; int tbgs = THREADBUFFERGROUPSTART(threadIdx.x); // TODO: remove this inttype TMPVAR; // is at least one outgoing transition enabled for a given state (needed to detect deadlocks) inttype outtrans_enabled; // Locally store the state sizes and syncbits if (threadIdx.x < SH_OFFSET) { shared[threadIdx.x] = 0; } for (i = threadIdx.x; i < HASHCONSTANTSLEN; i += blockDim.x) { shared[i+HASHCONSTANTSOFFSET] = d_h[i]; } for (i = threadIdx.x; i < VECTORPOSLEN; i += blockDim.x) { VECTORSTATEPOS(i) = d_firstbit_statevector[i]; } for (i = threadIdx.x; i < LTSSTATESIZELEN; i += blockDim.x) { STATESIZE(i) = d_bits_state[i]; } // Clean the cache for (i = threadIdx.x; i < (d_shared_q_size - CACHEOFFSET); i += blockDim.x) { cache[i] = EMPTYVECT32; } if(scan) { // Copy the work tile from global mem if (threadIdx.x < OPENTILELEN + LASTSEARCHLEN) { shared[OPENTILEOFFSET+threadIdx.x] = d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + threadIdx.x]; } if(threadIdx.x == 0) { OPENTILECOUNT = d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + OPENTILELEN + LASTSEARCHLEN]; } } else if (threadIdx.x < OPENTILELEN+LASTSEARCHLEN) { // On first run: initialize the work tile to empty shared[OPENTILEOFFSET+threadIdx.x] = threadIdx.x < OPENTILELEN ? EMPTYVECT32 : 0; } __syncthreads(); while (ITERATIONS < d_kernel_iters) { if (threadIdx.x == 0 && OPENTILECOUNT < OPENTILELEN && d_newstate_flags[blockIdx.x]) { // Indicate that we are scanning d_newstate_flags[blockIdx.x] = 2; SCAN = 1; } __syncthreads(); // Scan the open set for work; we use the OPENTILECOUNT flag at this stage to count retrieved elements if (SCAN) { inttype last_search_location = shared[LASTSEARCHOFFSET + WARP_ID]; // This block should be able to find a new state int found_new_state = 0; for (i = GLOBAL_WARP_ID; i < d_nrbuckets && OPENTILECOUNT < OPENTILELEN; i += NR_WARPS) { int loc = i + last_search_location; if(loc >= d_nrbuckets) { last_search_location = -i + GLOBAL_WARP_ID; loc = i + last_search_location; } tmp = d_q[loc*WARPSIZE+LANE]; l = EMPTYVECT32; if (ENTRY_ID == (d_sv_nints-1)) { if (ISNEWINT(tmp)) { found_new_state = 1; // try to increment the OPENTILECOUNT counter, if successful, store the state l = atomicAdd((uint32_t *) &OPENTILECOUNT, d_sv_nints); if (l < OPENTILELEN) { d_q[loc*WARPSIZE+LANE] = OLDINT(tmp); } } } // all threads read the OPENTILECOUNT value of the 'tail' thread, and possibly store their part of the vector in the shared memory if (LANEPOINTSTOVALIDBUCKETPOS) { l = __shfl(l, LANE-ENTRY_ID+d_sv_nints-1); if (l < OPENTILELEN) { // write part of vector to shared memory shared[OPENTILEOFFSET+l+ENTRY_ID] = tmp; } } } if(i < d_nrbuckets) { last_search_location = i - GLOBAL_WARP_ID; } else { last_search_location = 0; } if(LANE == 0) { shared[LASTSEARCHOFFSET + WARP_ID] = last_search_location; } if(found_new_state || i < d_nrbuckets) { WORKSCANRESULT = 1; } } __syncthreads(); // if work has been retrieved, indicate this if (threadIdx.x == 0) { if (OPENTILECOUNT > 0) { (*d_contBFS) = 1; } if(SCAN && WORKSCANRESULT == 0 && d_newstate_flags[blockIdx.x] == 2) { // Scanning has completed and no new states were found by this block, // save this information to prevent unnecessary scanning later on d_newstate_flags[blockIdx.x] = 0; } else { WORKSCANRESULT = 0; } scan = 0; } // is the thread part of an 'active' group? offset1 = 0; offset2 = 0; // Reset the whole thread buffer (shared + private) int start = THREADBUFFEROFFSET; int end = THREADBUFFEROFFSET + THREADBUFFERLEN; for(i = start + threadIdx.x; i < end; i+=blockDim.x) { shared[i] = 0; } if (THREADINGROUP) { act = 1 << d_bits_act; // Is there work? if (ISSTATE(src_state)) { // Gather the required transition information for all states in the tile i = tex1Dfetch(tex_proc_offsets_start, GROUP_ID); // Determine process state GETSTATEVECTORSTATE(cont, src_state, GROUP_ID); // Offset position index = cont/(INTSIZE/d_nbits_offset); pos = cont - (index*(INTSIZE/d_nbits_offset)); tmp = tex1Dfetch(tex_proc_offsets, i+index); GETTRANSOFFSET(offset1, tmp, pos); if (pos == (INTSIZE/d_nbits_offset)-1) { tmp = tex1Dfetch(tex_proc_offsets, i+index+1); GETTRANSOFFSET(offset2, tmp, 0); } else { GETTRANSOFFSET(offset2, tmp, pos+1); } } if (GROUP_ID == 0) { THREADGROUPPOR = 0; } } // iterate over the outgoing transitions of state 'cont' // variable cont is reused to indicate whether the buffer content of this thread still needs processing cont = 0; // while there is work to be done outtrans_enabled = 0; char generate = 1; char proviso_satisfied = 0; int cluster_trans = 1 << GROUP_ID; int orig_offset1 = offset1; while(generate > -1) { while (CONTINUE != 2 && __any(offset1 < offset2 || cont)) { if (offset1 < offset2 && !cont) { // reset act act = (1 << (d_bits_act)); // reset buffer of this thread for (l = 0; l < d_max_buf_ints; l++) { THREADBUFFERGROUPPOS(GROUP_ID, l) = 0; } } // if not sync, store in hash table // loop over all transentries while (1) { i = 1; if(offset1 < offset2 && !cont) { tmp = tex1Dfetch(tex_proc_trans, offset1); GETPROCTRANSSYNC(i, tmp); } if (__any(i == 0)) { if(i == 0) { // no deadlock outtrans_enabled = 1; // construct state for (l = 0; l < d_sv_nints; l++) { tgt_state[l] = src_state[l]; } offset1++; } // loop over this transentry for (l = 0; __any(i == 0 && l < NR_OF_STATES_IN_TRANSENTRY(GROUP_ID)); l++) { if(i == 0) { GETPROCTRANSSTATE(pos, tmp, l, GROUP_ID); if (pos > 0) { SETSTATEVECTORSTATE(tgt_state, GROUP_ID, pos-1); // check for violation of safety property, if required if (d_property == SAFETY) { if (GROUP_ID == d_nr_procs-1) { // pos contains state id + 1 // error state is state 1 if (pos == 2) { // error state found (*d_property_violation) = 1; } } } if (!d_check_cycle_proviso) { // Set proviso to 1 to indicate at least one state has been found proviso_satisfied = 1; } // store tgt_state in cache // if k == 8, cache is full, immediately store in global hash table if(generate == 1) { k = STOREINCACHE(tgt_state, cache, &bi); if(k >> 2) { proviso_satisfied |= (k >> 1) & 1; } else if (!d_check_cycle_proviso) { SETPORSTATE(&cache[bi]); } } else { MARKINCACHE(tgt_state, cache, (THREADGROUPPOR >> GROUP_ID) & 1); } } else { i = 1; } } store_cache_overflow_warp(d_q, d_newstate_flags, i == 0 && k == 8); int c; // Check cycle proviso with the whole warp while(generate && d_check_cycle_proviso && (c = __ballot(i == 0 && (k >> 2 == 0)))) { int active_lane = __ffs(c) - 1; int cache_index = __shfl(bi, active_lane); bj = FIND_WARP((inttype*) &cache[cache_index], d_q); if(LANE == active_lane) { i = 1; if(bj == 0) { proviso_satisfied = 1; } } } } } else { break; } } // i is the current relative position in the buffer for this thread i = 0; if (offset1 < offset2 && !cont) { GETPROCTRANSACT(act, tmp); // store transition entry THREADBUFFERGROUPPOS(GROUP_ID,i) = tmp; cont = 1; i++; offset1++; while (offset1 < offset2) { tmp = tex1Dfetch(tex_proc_trans, offset1); GETPROCTRANSACT(bitmask, tmp); if (act == bitmask) { THREADBUFFERGROUPPOS(GROUP_ID,i) = tmp; i++; offset1++; } else { break; } } } int sync_act = cont ? act : (1 << d_bits_act); for(i = 1; i < d_nr_procs; i<<=1) { sync_act = min(__shfl(sync_act, GTL((GROUP_ID + i) % d_nr_procs)), sync_act); } // Now, we have obtained the info needed to combine process transitions sync_offset1 = sync_offset2 = 0; int proc_enabled = (__ballot(act == sync_act) >> (LANE - GROUP_ID)) & ((1 << d_nr_procs) - 1); if(THREADINGROUP && sync_act < (1 << d_bits_act)) { // syncbits Offset position i = sync_act/(INTSIZE/d_nbits_syncbits_offset); pos = sync_act - (i*(INTSIZE/d_nbits_syncbits_offset)); l = tex1Dfetch(tex_syncbits_offsets, i); GETSYNCOFFSET(sync_offset1, l, pos); if (pos == (INTSIZE/d_nbits_syncbits_offset)-1) { l = tex1Dfetch(tex_syncbits_offsets, i+1); GETSYNCOFFSET(sync_offset2, l, 0); } else { GETSYNCOFFSET(sync_offset2, l, pos+1); } } // iterate through the relevant syncbit filters tmp = 1; for (int j = GROUP_ID;__any(sync_offset1 + j / (INTSIZE/d_nr_procs) < sync_offset2 && tmp); j+=d_nr_procs) { index = 0; if(THREADINGROUP && sync_act < (1 << d_bits_act) && sync_offset1 + j / (INTSIZE/d_nr_procs) < sync_offset2 && tmp) { index = tex1Dfetch(tex_syncbits, sync_offset1 + j / (INTSIZE/d_nr_procs)); } SETOLDSTATE(tgt_state); int has_second_succ = 0; GETSYNCRULE(tmp, index, j % (INTSIZE/d_nr_procs)); if (tmp != 0 && (tmp & proc_enabled) == tmp) { // source state is not a deadlock outtrans_enabled = 1; // start combining entries in the buffer to create target states // if sync rule applicable, construct the first successor // copy src_state into tgt_state for (pos = 0; pos < d_sv_nints; pos++) { tgt_state[pos] = src_state[pos]; } // construct first successor for (int rule = tmp; rule;) { pos = __ffs(rule) - 1; // get first state GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,0), 0, pos); SETSTATEVECTORSTATE(tgt_state, pos, k-1); GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,0), 1, pos); has_second_succ |= k; if(d_max_buf_ints > 1 && !k) { GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,1), 0, pos); has_second_succ |= k; } rule &= ~(1 << pos); } SETNEWSTATE(tgt_state); } int rule_proviso = 0; // while we keep getting new states, store them while (__any(ISNEWSTATE(tgt_state))) { l = k = TMPVAR = bitmask = 0; if(ISNEWSTATE(tgt_state)) { // check for violation of safety property, if required if (d_property == SAFETY) { GETSTATEVECTORSTATE(pos, tgt_state, d_nr_procs-1); if (pos == 1) { // error state found (*d_property_violation) = 1; } } if (!d_check_cycle_proviso) { // Set rule_proviso to 1 to indicate at least one state has been found rule_proviso = 1; } // store tgt_state in cache; if i == d_shared_q_size, state was found, duplicate detected // if i == d_shared_q_size+1, cache is full, immediately store in global hash table if(generate == 1) { TMPVAR = STOREINCACHE(tgt_state, cache, &bitmask); if(TMPVAR >> 2) { rule_proviso |= (TMPVAR >> 1) & 1; } else if (!d_check_cycle_proviso) { SETPORSTATE(&cache[bitmask]); } } else { MARKINCACHE(tgt_state, cache, (THREADGROUPPOR & tmp) == tmp); } l = 1; k = has_second_succ; if(!has_second_succ) { SETOLDSTATE(tgt_state); } } store_cache_overflow_warp(d_q, d_newstate_flags, l && TMPVAR == 8); int c; // Check cycle proviso with the whole warp while(generate && d_check_cycle_proviso && (c = __ballot(l && (TMPVAR >> 2 == 0)))) { int active_lane = __ffs(c) - 1; int cache_index = __shfl(bitmask, active_lane); bj = FIND_WARP((inttype*) &cache[cache_index], d_q); if(LANE == active_lane) { l = 0; if(bj == 0) { rule_proviso = 1; } } } if(k) { // get next successor int rule; for (rule = tmp; rule;) { pos = __ffs(rule) - 1; int curr_st; GETSTATEVECTORSTATE(curr_st, tgt_state, pos); int st = 0; for (k = 0; k < d_max_buf_ints; k++) { for (l = 0; l < NR_OF_STATES_IN_TRANSENTRY(pos); l++) { GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,k), l, pos); if (curr_st == (st-1)) { break; } } if (curr_st == (st-1)) { break; } } // Assumption: element has been found (otherwise, 'last' was not a valid successor) // Try to get the next element if (l == NR_OF_STATES_IN_TRANSENTRY(pos) - 1) { if (k >= d_max_buf_ints-1) { st = 0; } else { k++; l = 0; } } else { l++; } // Retrieve next element, insert it in 'tgt_state' if it is not 0, and return result, otherwise continue if (st != 0) { GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,k), l, pos); if (st > 0) { SETSTATEVECTORSTATE(tgt_state, pos, st-1); SETNEWSTATE(tgt_state); break; } } // else, set this process state to first one, and continue to next process GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,0), 0, pos); SETSTATEVECTORSTATE(tgt_state, pos, st-1); rule &= ~(1 << pos); } // did we find a successor? if not, set tgt_state to old if (rule == 0) { SETOLDSTATE(tgt_state); } } } for (l = 0; l < d_nr_procs; l++) { // Exchange the sync rules so every thread can update its cluster_trans int sync_rule = __shfl(tmp, GTL((GROUP_ID + l) % d_nr_procs)); int proviso = __shfl(rule_proviso, GTL((GROUP_ID + l) % d_nr_procs)); if(GETBIT(GROUP_ID, sync_rule) && sync_act == act) { cluster_trans |= sync_rule; proviso_satisfied |= proviso; } } } // only active threads should reset 'cont' if (cont && sync_act == act) { cont = 0; } } // END WHILE CONTINUE == 1 if(generate == 1 && THREADINGROUP) { // Choose a cluster for reduction if(!proviso_satisfied) { cluster_trans = cluster_trans & ~(1 << GROUP_ID); } THREADBUFFERGROUPPOS(GROUP_ID,0) = cluster_trans; __syncthreads(); proviso_satisfied = 0; int to_check = cluster_trans; while (to_check) { i = __ffs(to_check) - 1; to_check &= ~(1 << i); int cluster = THREADBUFFERGROUPPOS(i, 0); proviso_satisfied |= GETBIT(i, cluster); to_check |= cluster & ~cluster_trans & ~(1 << i); cluster_trans |= cluster; } __syncthreads(); if(!proviso_satisfied) { THREADBUFFERGROUPPOS(GROUP_ID,0) = 0; } else { THREADBUFFERGROUPPOS(GROUP_ID,0) = cluster_trans; } __syncthreads(); if(GROUP_ID == 0) { int min = d_nr_procs; int cluster = 0xFFFFFFFF >> (INTSIZE - d_nr_procs); for(i = 0; i < d_nr_procs; i++) { if(THREADBUFFERGROUPPOS(i,0) > 0 && __popc(THREADBUFFERGROUPPOS(i,0)) < min) { min = __popc(THREADBUFFERGROUPPOS(i,0)); cluster = THREADBUFFERGROUPPOS(i,0); } } THREADGROUPPOR = cluster; if(cluster < (0xFFFFFFFF >> (INTSIZE - d_nr_procs))) { // printf("Selected cluster %d for POR\n",cluster); } } __syncthreads(); } offset1 = orig_offset1; generate--; } // END while(generate > -1) // have we encountered a deadlock state? // we use the shared memory to communicate this to the group leaders if (d_property == DEADLOCK) { if (THREADINGROUP) { if (ISSTATE(src_state)) { THREADBUFFERGROUPPOS(GROUP_ID, 0) = outtrans_enabled; // group leader collects results l = 0; if (GROUP_ID == 0) { for (i = 0; i < d_nr_procs; i++) { l += THREADBUFFERGROUPPOS(i, 0); } if (l == 0) { // deadlock state found (*d_property_violation) = 1; } } } } } int performed_work = OPENTILECOUNT != 0; __syncthreads(); // Reset the open queue tile if (threadIdx.x < OPENTILELEN) { shared[OPENTILEOFFSET+threadIdx.x] = EMPTYVECT32; } if (threadIdx.x == 0) { OPENTILECOUNT = 0; } __syncthreads(); // start scanning the local cache and write results to the global hash table if(performed_work) { copy_cache_to_global(d_q, cache, d_newstate_flags); } __syncthreads(); // Ready to start next iteration, if error has not occurred if (threadIdx.x == 0) { if (CONTINUE == 2) { (*d_contBFS) = 2; ITERATIONS = d_kernel_iters; } else { ITERATIONS++; } CONTINUE = 0; } __syncthreads(); } //Copy the work tile to global mem if (threadIdx.x < OPENTILELEN+LASTSEARCHLEN) { d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + threadIdx.x] = shared[OPENTILEOFFSET+threadIdx.x]; } if(threadIdx.x == 0) { d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + OPENTILELEN+LASTSEARCHLEN] = OPENTILECOUNT; } } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char** argv) { FILE *fp; inttype nr_procs, bits_act, bits_statevector, sv_nints, nr_trans, proc_nrstates, nbits_offset, max_buf_ints, nr_syncbits_offsets, nr_syncbits, nbits_syncbits_offset; inttype *bits_state, *firstbit_statevector, *proc_offsets, *proc_trans, *proc_offsets_start, *syncbits_offsets, *syncbits; inttype contBFS, counted_states; char stmp[BUFFERSIZE], fn[BUFFERSIZE]; // to store constants for closed set hash functions int h[NR_HASH_FUNCTIONS*2]; // size of global hash table size_t q_size = 0; PropertyStatus check_property = NONE; // nr of iterations in single kernel run int kernel_iters = KERNEL_ITERS; int nblocks = NR_OF_BLOCKS; int nthreadsperblock = BLOCK_SIZE; // POR options int apply_por = 0; int use_cycle_proviso = 0; // level of verbosity (1=print level progress) int verbosity = 0; char* dump_file = NULL; // clock to measure time clock_t start, stop; double runtime = 0.0; // Start timer assert((start = clock())!=-1); hipDeviceProp_t prop; int nDevices; // GPU side versions of the input inttype *d_bits_state, *d_firstbit_statevector, *d_proc_offsets_start, *d_proc_offsets, *d_proc_trans, *d_syncbits_offsets, *d_syncbits, *d_h; // flag to keep track of progress and whether hash table errors occurred (value==2) inttype *d_contBFS; // flags to track which blocks have new states inttype *d_newstate_flags; // flag to keep track of property verification outcome inttype *d_property_violation; // Integer to store the amount of states counted in the hash table inttype *d_counted_states; // Space to temporarily store work tiles inttype *d_worktiles; // GPU datastructures for calculation inttype *d_q; const char* help_text = "Usage: GPUexplore <model> [OPTIONS]\n" "Run state-space exploration on model (do not include the file extension).\n" "options:\n" " -d Check for deadlocks\n" " -p Check a safety property (should be embedded in the model)\n" " --por Apply partial-order reduction\n" " --cycle-proviso Apply the cycle proviso during partial-order reduction\n" " -k NUM Run NUM iterations per kernel launch (default 1)\n" " -b NUM Run the kernel on NUM blocks (default 1)\n" " -t NUM Use NUM threads per block (default 32)\n" " -q NUM Allocate NUM integers for the hash table\n" " --dump FILE Dump the state space to FILE after completing the exploration\n" " -v NUM Change the verbosity:\n" " 0 - minimal output\n" " 1 - print sequence number of each kernel launch\n" " 2 - print number of states in the hash table after each kernel launch\n" " 3 - print state vectors after each kernel launch\n" " -h, --help Show this help message\n"; if (argc == 1) { fprintf(stderr, "ERROR: No input network given!\n"); fprintf(stdout, help_text); exit(1); } else if(!strcmp(argv[1],"--help") || !strcmp(argv[1],"-h") || !strcmp(argv[1],"-?")) { fprintf(stdout, help_text); exit(0); } strcpy(fn, argv[1]); strcat(fn, ".gpf"); int i = 2; while (i < argc) { if (!strcmp(argv[i],"--help") || !strcmp(argv[i],"-h") || !strcmp(argv[i],"-?")) { fprintf(stdout, help_text); exit(0); } else if (!strcmp(argv[i],"-k")) { // if nr. of iterations per kernel run is given, store it kernel_iters = atoi(argv[i+1]); i += 2; } else if (!strcmp(argv[i],"-b")) { // store nr of blocks to be used nblocks = atoi(argv[i+1]); i += 2; } else if (!strcmp(argv[i],"-t")) { // store nr of threads per block to be used nthreadsperblock = atoi(argv[i+1]); i += 2; } else if (!strcmp(argv[i],"-q")) { // store hash table size q_size = atoll(argv[i+1]); i += 2; } else if (!strcmp(argv[i],"-v")) { // store verbosity level verbosity = atoi(argv[i+1]); if (verbosity > 3) { verbosity = 3; } i += 2; } else if (!strcmp(argv[i],"-d")) { // check for deadlocks check_property = DEADLOCK; use_cycle_proviso = 0; i += 1; } else if (!strcmp(argv[i],"-p")) { // check a property check_property = SAFETY; use_cycle_proviso = 1; i += 1; } else if (!strcmp(argv[i],"--por")) { // apply partial-order reduction apply_por = 1; i += 1; } else if (!strcmp(argv[i],"--cycle-proviso")) { // use cycle proviso if (check_property == NONE) { use_cycle_proviso = 1; } i += 1; } else if (!strcmp(argv[i],"--dump")) { dump_file = argv[i+1]; i += 2; } else { fprintf(stderr, "ERROR: unrecognized option %s\n", argv[i]); fprintf(stdout, help_text); exit(1); } } fp = fopen(fn, "r"); if (fp) { // Read the input if (fgets(stmp, BUFFERSIZE, fp) != NULL && check_property == SAFETY) { i = atoi(stmp); fprintf(stdout, "Property to check is "); if (i == 0) { fprintf(stdout, "not "); } fprintf(stdout, "a liveness property\n"); if (i == 1) { check_property = LIVENESS; } } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nr_procs = atoi(stmp); fprintf(stdout, "nr of procs: %d\n", nr_procs); } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { bits_act = atoi(stmp); fprintf(stdout, "nr of bits for transition label: %d\n", bits_act); } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { proc_nrstates = atoi(stmp); fprintf(stdout, "min. nr. of proc. states that fit in 32-bit integer: %d\n", proc_nrstates); } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { bits_statevector = atoi(stmp) + apply_por; fprintf(stdout, "number of bits needed for a state vector: %d\n", bits_statevector); } firstbit_statevector = (inttype*) malloc(sizeof(inttype)*(nr_procs+1)); for (int i = 0; i <= nr_procs; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { firstbit_statevector[i] = atoi(stmp); fprintf(stdout, "statevector offset %d: %d\n", i, firstbit_statevector[i]); } } // determine the number of integers needed for a state vector sv_nints = (bits_statevector+31) / INTSIZE; bits_state = (inttype*) malloc(sizeof(inttype)*nr_procs); for (int i = 0; i < nr_procs; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { bits_state[i] = atoi(stmp); fprintf(stdout, "bits for states of process LTS %d: %d\n", i, bits_state[i]); } } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nbits_offset = atoi(stmp); fprintf(stdout, "size of offset in process LTSs: %d\n", nbits_offset); } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { max_buf_ints = atoi(stmp); fprintf(stdout, "maximum label-bounded branching factor: %d\n", max_buf_ints); } proc_offsets_start = (inttype*) malloc(sizeof(inttype)*(nr_procs+1)); for (int i = 0; i <= nr_procs; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { proc_offsets_start[i] = atoi(stmp); } } proc_offsets = (inttype*) malloc(sizeof(inttype)*proc_offsets_start[nr_procs]); for (int i = 0; i < proc_offsets_start[nr_procs]; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { proc_offsets[i] = atoi(stmp); } } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nr_trans = atoi(stmp); fprintf(stdout, "total number of transition entries in network: %d\n", nr_trans); } proc_trans = (inttype*) malloc(sizeof(inttype)*nr_trans); for (int i = 0; i < nr_trans; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { proc_trans[i] = atoi(stmp); } } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nbits_syncbits_offset = atoi(stmp); } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nr_syncbits_offsets = atoi(stmp); } syncbits_offsets = (inttype*) malloc(sizeof(inttype)*nr_syncbits_offsets); for (int i = 0; i < nr_syncbits_offsets; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { syncbits_offsets[i] = atoi(stmp); } } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nr_syncbits = atoi(stmp); } syncbits = (inttype*) malloc(sizeof(inttype)*nr_syncbits); for (int i = 0; i < nr_syncbits; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { syncbits[i] = atoi(stmp); } } } else { fprintf(stderr, "ERROR: input network does not exist!\n"); exit(1); } // Randomly define the closed set hash functions srand(time(NULL)); for (int i = 0; i < NR_HASH_FUNCTIONS*2; i++) { h[i] = rand(); } // continue flags contBFS = 1; // Query the device properties and determine data structure sizes hipGetDeviceCount(&nDevices); if (nDevices == 0) { fprintf (stderr, "ERROR: No CUDA compatible GPU detected!\n"); exit(1); } hipGetDeviceProperties(&prop, 0); fprintf (stdout, "global mem: %lu\n", (uint64_t) prop.totalGlobalMem); fprintf (stdout, "shared mem per block: %d\n", (int) prop.sharedMemPerBlock); fprintf (stdout, "shared mem per SM: %d\n", (int) prop.sharedMemPerMultiprocessor); fprintf (stdout, "max. threads per block: %d\n", (int) prop.maxThreadsPerBlock); fprintf (stdout, "max. grid size: %d\n", (int) prop.maxGridSize[0]); fprintf (stdout, "nr. of multiprocessors: %d\n", (int) prop.multiProcessorCount); // determine actual nr of blocks nblocks = MAX(1,MIN(prop.maxGridSize[0],nblocks)); // Allocate memory on GPU cudaMallocCount((void **) &d_contBFS, sizeof(inttype)); cudaMallocCount((void **) &d_property_violation, sizeof(inttype)); cudaMallocCount((void **) &d_counted_states, sizeof(inttype)); cudaMallocCount((void **) &d_h, NR_HASH_FUNCTIONS*2*sizeof(inttype)); cudaMallocCount((void **) &d_bits_state, nr_procs*sizeof(inttype)); cudaMallocCount((void **) &d_firstbit_statevector, (nr_procs+1)*sizeof(inttype)); cudaMallocCount((void **) &d_proc_offsets_start, (nr_procs+1)*sizeof(inttype)); cudaMallocCount((void **) &d_proc_offsets, proc_offsets_start[nr_procs]*sizeof(inttype)); cudaMallocCount((void **) &d_proc_trans, nr_trans*sizeof(inttype)); cudaMallocCount((void **) &d_syncbits_offsets, nr_syncbits_offsets*sizeof(inttype)); cudaMallocCount((void **) &d_syncbits, nr_syncbits*sizeof(inttype)); cudaMallocCount((void **) &d_newstate_flags, nblocks*sizeof(inttype)); cudaMallocCount((void **) &d_worktiles, nblocks * (sv_nints*(nthreadsperblock/nr_procs)+nthreadsperblock/WARPSIZE+1)*sizeof(inttype)); // Copy data to GPU CUDA_CHECK_RETURN(hipMemcpy(d_contBFS, &contBFS, sizeof(inttype), hipMemcpyHostToDevice)) CUDA_CHECK_RETURN(hipMemcpy(d_h, h, NR_HASH_FUNCTIONS*2*sizeof(inttype), hipMemcpyHostToDevice)) CUDA_CHECK_RETURN(hipMemcpy(d_bits_state, bits_state, nr_procs*sizeof(inttype), hipMemcpyHostToDevice)) CUDA_CHECK_RETURN(hipMemcpy(d_firstbit_statevector, firstbit_statevector, (nr_procs+1)*sizeof(inttype), hipMemcpyHostToDevice)) CUDA_CHECK_RETURN(hipMemcpy(d_proc_offsets_start, proc_offsets_start, (nr_procs+1)*sizeof(inttype), hipMemcpyHostToDevice)) CUDA_CHECK_RETURN(hipMemcpy(d_proc_offsets, proc_offsets, proc_offsets_start[nr_procs]*sizeof(inttype), hipMemcpyHostToDevice)) CUDA_CHECK_RETURN(hipMemcpy(d_proc_trans, proc_trans, nr_trans*sizeof(inttype), hipMemcpyHostToDevice)) CUDA_CHECK_RETURN(hipMemcpy(d_syncbits_offsets, syncbits_offsets, nr_syncbits_offsets*sizeof(inttype), hipMemcpyHostToDevice)) CUDA_CHECK_RETURN(hipMemcpy(d_syncbits, syncbits, nr_syncbits*sizeof(inttype), hipMemcpyHostToDevice)) CUDA_CHECK_RETURN(hipMemset(d_newstate_flags, 0, nblocks*sizeof(inttype))); CUDA_CHECK_RETURN(hipMemset(d_worktiles, 0, nblocks * (sv_nints*(nthreadsperblock/nr_procs)+nthreadsperblock/WARPSIZE+1)*sizeof(inttype))); CUDA_CHECK_RETURN(hipMemset(d_counted_states, 0, sizeof(inttype))); // Bind data to textures hipBindTexture(NULL, tex_proc_offsets_start, d_proc_offsets_start, (nr_procs+1)*sizeof(inttype)); hipBindTexture(NULL, tex_proc_offsets, d_proc_offsets, proc_offsets_start[nr_procs]*sizeof(inttype)); hipBindTexture(NULL, tex_proc_trans, d_proc_trans, nr_trans*sizeof(inttype)); hipBindTexture(NULL, tex_syncbits_offsets, d_syncbits_offsets, nr_syncbits_offsets*sizeof(inttype)); hipBindTexture(NULL, tex_syncbits, d_syncbits, nr_syncbits*sizeof(inttype)); size_t available, total; hipMemGetInfo(&available, &total); if (q_size == 0) { q_size = total / sizeof(inttype); } size_t el_per_Mb = Mb / sizeof(inttype); while(hipMalloc((void**)&d_q, q_size * sizeof(inttype)) == hipErrorMemoryAllocation) { q_size -= el_per_Mb; if( q_size < el_per_Mb) { // signal no free memory break; } } fprintf (stdout, "global mem queue size: %lu, number of entries: %lu\n", q_size*sizeof(inttype), (indextype) q_size); inttype shared_q_size = (int) prop.sharedMemPerMultiprocessor / sizeof(inttype) / 2; fprintf (stdout, "shared mem queue size: %lu, number of entries: %u\n", shared_q_size*sizeof(inttype), shared_q_size); fprintf (stdout, "nr. of blocks: %d, block size: %d, nr of kernel iterations: %d\n", nblocks, nthreadsperblock, kernel_iters); // copy symbols inttype tablesize = q_size; inttype nrbuckets = tablesize / WARPSIZE; hipMemcpyToSymbol(d_nrbuckets, &nrbuckets, sizeof(inttype)); hipMemcpyToSymbol(d_shared_q_size, &shared_q_size, sizeof(inttype)); hipMemcpyToSymbol(d_nr_procs, &nr_procs, sizeof(inttype)); hipMemcpyToSymbol(d_max_buf_ints, &max_buf_ints, sizeof(inttype)); hipMemcpyToSymbol(d_sv_nints, &sv_nints, sizeof(inttype)); hipMemcpyToSymbol(d_bits_act, &bits_act, sizeof(inttype)); hipMemcpyToSymbol(d_nbits_offset, &nbits_offset, sizeof(inttype)); hipMemcpyToSymbol(d_nbits_syncbits_offset, &nbits_syncbits_offset, sizeof(inttype)); hipMemcpyToSymbol(d_kernel_iters, &kernel_iters, sizeof(inttype)); hipMemcpyToSymbol(d_property, &check_property, sizeof(inttype)); hipMemcpyToSymbol(d_apply_por, &apply_por, sizeof(inttype)); hipMemcpyToSymbol(d_check_cycle_proviso, &use_cycle_proviso, sizeof(inttype)); // init the hash table hipLaunchKernelGGL(( init_queue), dim3(nblocks), dim3(nthreadsperblock), 0, 0, d_q, q_size); hipLaunchKernelGGL(( store_initial), dim3(1),dim3(1), 0, 0, d_q, d_h, d_newstate_flags,nthreadsperblock,nblocks); for (int i = 0; i < 2*NR_HASH_FUNCTIONS; i++) { fprintf (stdout, "hash constant %d: %d\n", i, h[i]); } FIRSTHASHHOST(i); fprintf (stdout, "hash of initial state: %d\n", i); inttype zero = 0; inttype *q_test = (inttype*) malloc(sizeof(inttype)*tablesize); int j = 0; inttype scan = 0; CUDA_CHECK_RETURN(hipMemcpy(d_property_violation, &zero, sizeof(inttype), hipMemcpyHostToDevice)) inttype property_violation = 0; clock_t exploration_start; assert((exploration_start = clock())!=-1); while (contBFS == 1) { CUDA_CHECK_RETURN(hipMemcpy(d_contBFS, &zero, sizeof(inttype), hipMemcpyHostToDevice)) if(apply_por) { hipLaunchKernelGGL(( gather_por), dim3(nblocks), dim3(nthreadsperblock), shared_q_size*sizeof(inttype), 0, d_q, d_h, d_bits_state, d_firstbit_statevector, d_proc_offsets_start, d_proc_offsets, d_proc_trans, d_syncbits_offsets, d_syncbits, d_contBFS, d_property_violation, d_newstate_flags, d_worktiles, scan); } else { hipLaunchKernelGGL(( gather), dim3(nblocks), dim3(nthreadsperblock), shared_q_size*sizeof(inttype), 0, d_q, d_h, d_bits_state, d_firstbit_statevector, d_contBFS, d_property_violation, d_newstate_flags, d_worktiles, scan); } // copy progress result //CUDA_CHECK_RETURN(hipGetLastError()); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipMemcpy(&contBFS, d_contBFS, sizeof(inttype), hipMemcpyDeviceToHost)) if (check_property > 0) { CUDA_CHECK_RETURN(hipMemcpy(&property_violation, d_property_violation, sizeof(inttype), hipMemcpyDeviceToHost)) if (property_violation == 1) { contBFS = 0; } } if (verbosity > 0) { if (verbosity == 1) { printf ("%d\n", j++); } else if (verbosity == 2) { hipMemcpy(q_test, d_q, tablesize*sizeof(inttype), hipMemcpyDeviceToHost); count_local_queue(q_test, tablesize, firstbit_statevector, nr_procs, sv_nints); } else if (verbosity == 3) { hipMemcpy(q_test, d_q, tablesize*sizeof(inttype), hipMemcpyDeviceToHost); print_local_queue(stdout, q_test, tablesize, firstbit_statevector, nr_procs, sv_nints, apply_por); } } scan = 1; } // determine runtime stop = clock(); runtime = (double) (stop-start)/CLOCKS_PER_SEC; fprintf (stdout, "Run time: %f\n", runtime); runtime = (double) (stop-exploration_start)/CLOCKS_PER_SEC; fprintf(stdout, "Exploration time %f\n", runtime); if (property_violation == 1) { switch (check_property) { case DEADLOCK: printf ("deadlock detected!\n"); break; case SAFETY: printf ("safety property violation detected!\n"); break; case LIVENESS: printf ("liveness property violation detected!\n"); break; } } // report error if required if (contBFS == 2) { fprintf (stderr, "ERROR: problem with hash table\n"); } CUDA_CHECK_RETURN(hipMemset(d_counted_states, 0, sizeof(inttype))); hipLaunchKernelGGL(( count_states), dim3(((int) prop.multiProcessorCount)*8), dim3(512), 1, 0, d_q, d_counted_states); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipMemcpy(&counted_states, d_counted_states, sizeof(inttype), hipMemcpyDeviceToHost)); fprintf (stdout, "nr. of states in hash table: %d\n", counted_states); // Debugging functionality: print states to file if(dump_file) { FILE* fout; if((fout = fopen(dump_file, "w")) != NULL) { fprintf(stdout, "Dumping state space to file...\n"); hipMemcpy(q_test, d_q, tablesize*sizeof(inttype), hipMemcpyDeviceToHost); print_local_queue(fout, q_test, tablesize, firstbit_statevector, nr_procs, sv_nints, apply_por); fclose(fout); } else { fprintf(stderr, "Could not open file to dump the state space\n"); } } return 0; }
cab6d4114d28c18918168452a4f26a4bb10b2f69.cu
/* ============================================================================ Name : GPUexplore.cu Author : Anton Wijs and Thomas Neele Version : Copyright : Copyright Anton Wijs and Thomas Neele Description : CUDA GPUexplore: On the fly state space analysis ============================================================================ */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <assert.h> #include <time.h> #include <math.h> // type of elements used #define inttype uint32_t // type of indices in hash table #define indextype uint64_t enum BucketEntryStatus { EMPTY, TAKEN, FOUND }; enum PropertyStatus { NONE, DEADLOCK, SAFETY, LIVENESS }; #define MIN(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a < _b ? _a : _b; }) #define MAX(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a > _b ? _a : _b; }) // Nr of tiles processed in single kernel launch //#define TILEITERS 10 static const int WARPSIZE = 32; static const int HALFWARPSIZE = 16; static const int INTSIZE = 32; static const int BUFFERSIZE = 256; // GPU constants __constant__ inttype d_nrbuckets; __constant__ inttype d_shared_q_size; __constant__ inttype d_nr_procs; __constant__ inttype d_max_buf_ints; __constant__ inttype d_sv_nints; __constant__ inttype d_bits_act; __constant__ inttype d_nbits_offset; __constant__ inttype d_kernel_iters; __constant__ inttype d_nbits_syncbits_offset; __constant__ PropertyStatus d_property; __constant__ inttype d_apply_por; __constant__ inttype d_check_cycle_proviso; // GPU shared memory array extern __shared__ volatile inttype shared[]; // thread ids #define WARP_ID (threadIdx.x / WARPSIZE) #define GLOBAL_WARP_ID (((blockDim.x / WARPSIZE)*blockIdx.x)+WARP_ID) #define NR_WARPS ((blockDim.x / WARPSIZE)*gridDim.x) #define LANE (threadIdx.x % WARPSIZE) #define HALFLANE (threadIdx.x % HALFWARPSIZE) //#define ENTRY_ID (LANE % d_sv_nints) #define ENTRY_ID (HALFLANE % d_sv_nints) #define GROUP_ID (LANE % d_nr_procs) #define GROUP_GID (WARP_ID * GROUPS_PER_WARP + LANE / d_nr_procs) #define NR_GROUPS ((blockDim.x / WARPSIZE) * GROUPS_PER_WARP) #define GROUPS_PER_WARP (WARPSIZE / d_nr_procs) // Group id to lane and lane to group id macros #define GTL(i) (LANE - GROUP_ID + (i)) #define LTG(i) ((i) - (LANE - GROUP_ID)) //#define NREL_IN_BUCKET ((WARPSIZE / d_sv_nints)) #define NREL_IN_BUCKET ((HALFWARPSIZE / d_sv_nints)*2) #define NREL_IN_BUCKET_HOST ((HALFWARPSIZE / sv_nints)*2) // constant for cuckoo hashing (Alcantara et al) static const inttype P = 979946131; // Retry constant to determine number of retries for element insertion #define RETRYFREQ 7 #define NR_HASH_FUNCTIONS 8 // Number of retries in local cache #define CACHERETRYFREQ 20 // Maximum size of state vectors (in nr. of 32-bit integers) #define MAX_SIZE 9 // Empty state vectors static const inttype EMPTYVECT32 = 0x7FFFFFFF; // Constant to indicate that no more work is required # define EXPLORATION_DONE 0x7FFFFFFF // offset in shared memory from which loaded data can be read static const int SH_OFFSET = 5; //static const int KERNEL_ITERS = 10; //static const int NR_OF_BLOCKS = 3120; //static const int BLOCK_SIZE = 512; static const int KERNEL_ITERS = 1; static const int NR_OF_BLOCKS = 1; static const int BLOCK_SIZE = 32; const size_t Mb = 1<<20; // test macros #define PRINTTHREADID() {printf("Hello thread %d\n", (blockIdx.x*blockDim.x)+threadIdx.x);} #define PRINTTHREAD(j, i) {printf("%d: Seen by thread %d: %d\n", (j), (blockIdx.x*blockDim.x)+threadIdx.x, (i));} // Offsets calculations for shared memory arrays #define HASHCONSTANTSLEN (2*NR_HASH_FUNCTIONS) #define VECTORPOSLEN (d_nr_procs+1) #define LTSSTATESIZELEN (d_nr_procs) #define OPENTILELEN (d_sv_nints*NR_GROUPS) #define LASTSEARCHLEN (blockDim.x/WARPSIZE) #define TGTSTATELEN (blockDim.x*d_sv_nints) #define THREADBUFFERLEN (NR_GROUPS*(THREADBUFFERSHARED+(d_nr_procs*d_max_buf_ints))) #define HASHCONSTANTSOFFSET (SH_OFFSET) #define VECTORPOSOFFSET (HASHCONSTANTSOFFSET+HASHCONSTANTSLEN) #define LTSSTATESIZEOFFSET (VECTORPOSOFFSET+VECTORPOSLEN) #define OPENTILEOFFSET (LTSSTATESIZEOFFSET+LTSSTATESIZELEN) #define LASTSEARCHOFFSET (OPENTILEOFFSET+OPENTILELEN) #define TGTSTATEOFFSET (LASTSEARCHOFFSET+LASTSEARCHLEN) #define THREADBUFFEROFFSET (TGTSTATEOFFSET+TGTSTATELEN) #define CACHEOFFSET (THREADBUFFEROFFSET+THREADBUFFERLEN) // One int for sync action counter // One int for POR counter #define THREADBUFFERSHARED 2 // parameter is thread id #define THREADBUFFERGROUPSTART(i) (THREADBUFFEROFFSET+ (((i) / WARPSIZE)*GROUPS_PER_WARP+(((i) % WARPSIZE) / d_nr_procs)) * (THREADBUFFERSHARED+(d_nr_procs*d_max_buf_ints))) // parameter is group id #define THREADBUFFERGROUPPOS(i, j) shared[tbgs+THREADBUFFERSHARED+((i)*d_max_buf_ints)+(j)] #define THREADGROUPCOUNTER shared[tbgs] #define THREADGROUPPOR shared[tbgs + 1] #define THREADINGROUP (LANE < (GROUPS_PER_WARP)*d_nr_procs) #define STATESIZE(i) (shared[LTSSTATESIZEOFFSET+(i)]) #define VECTORSTATEPOS(i) (shared[VECTORPOSOFFSET+(i)]) #define NR_OF_STATES_IN_TRANSENTRY(i) ((31 - d_bits_act) / shared[LTSSTATESIZEOFFSET+(i)]) // SM local progress flags #define ITERATIONS (shared[0]) #define CONTINUE (shared[1]) #define OPENTILECOUNT (shared[2]) #define WORKSCANRESULT (shared[3]) #define SCAN (shared[4]) // BIT MANIPULATION MACROS #define SETBIT(i, x) {(x) = ((1<<(i)) | (x));} #define GETBIT(i, x) (((x) >> (i)) & 1) #define SETBITS(i, j, x) {(x) = (x) | (((1<<(j))-1)^((1<<(i))-1));} #define GETBITS(x, y, start, len) {(x) = ((y) >> (start)) & ((1 << (len)) - 1);} #define GETPROCTRANSACT(a, t) GETBITS(a, t, 1, d_bits_act) #define GETPROCTRANSSYNC(a, t) {(a) = ((t) & 1);} #define GETPROCTRANSSTATE(a, t, i, j) GETBITS(a, t, 1+d_bits_act+(i)*STATESIZE(j), STATESIZE(j)) #define GETTRANSOFFSET(a, t, i) GETBITS(a, t, (i)*d_nbits_offset, d_nbits_offset) #define GETSYNCOFFSET(a, t, i) GETBITS(a, t, (i)*d_nbits_syncbits_offset, d_nbits_syncbits_offset) #define GETSTATEVECTORSTATE(b, t, i) { asm("{\n\t" \ " .reg .u64 t1;\n\t" \ " mov.b64 t1,{%1,%2};\n\t" \ " bfe.u64 t1, t1, %3, %4;\n\t" \ " cvt.u32.u64 %0,t1;\n\t" \ "}" : "=r"(b) : "r"((t)[VECTORSTATEPOS(i)/INTSIZE]), "r"(VECTORSTATEPOS(i)/INTSIZE == (VECTORSTATEPOS((i)+1)-1)/INTSIZE ? 0 : (t)[VECTORSTATEPOS(i)/INTSIZE+1]), \ "r"(VECTORSTATEPOS(i)%INTSIZE), "r"(VECTORSTATEPOS(i+1)-VECTORSTATEPOS(i))); \ } #define SETSTATEVECTORSTATE(t, i, x) { asm("bfi.b32 %0, %1, %0, %2, %3;" \ : "+r"((t)[VECTORSTATEPOS(i)/INTSIZE]) : \ "r"(x), "r"(VECTORSTATEPOS(i)%INTSIZE), "r"(VECTORSTATEPOS((i)+1)-VECTORSTATEPOS(i))); \ if (VECTORSTATEPOS(i)/INTSIZE != (VECTORSTATEPOS((i)+1)-1)/INTSIZE) { \ asm("bfi.b32 %0, %1, %0, %2, %3;" \ : "+r"((t)[VECTORSTATEPOS(i+1)/INTSIZE]) : \ "r"((x)>>(INTSIZE - (VECTORSTATEPOS(i) % INTSIZE))), "r"(0), "r"(VECTORSTATEPOS((i)+1) % INTSIZE)); \ } \ } // NEEDS FIX: USE BIT 32 OF FIRST INTEGER TO INDICATE STATE OR NOT (1 or 0), IN CASE MULTIPLE INTEGERS ARE USED FOR STATE VECTOR!!! //#define ISSTATE(t) ((t)[(d_sv_nints-1)] != EMPTYVECT32) #define ISSTATE(t) ((t)[0] != EMPTYVECT32) #define SETNEWSTATE(t) { (t)[(d_sv_nints-1)] = (t)[(d_sv_nints-1)] | 0x80000000;} #define SETOLDSTATE(t) { (t)[(d_sv_nints-1)] = (t)[(d_sv_nints-1)] & 0x7FFFFFFF;} #define ISNEWSTATE(t) ((t)[(d_sv_nints-1)] >> 31) #define ISNEWSTATE_HOST(t) ((t)[(sv_nints-1)] >> 31) #define ISNEWINT(t) ((t) >> 31) #define OLDINT(t) ((t) & 0x7FFFFFFF) #define NEWINT(t) ((t) | 0x80000000) #define SETPORSTATE(t) { (t)[(d_sv_nints-1)] = (t)[(d_sv_nints-1)] | 0x40000000;} #define SETOTHERSTATE(t) { (t)[(d_sv_nints-1)] = (t)[(d_sv_nints-1)] & 0xBFFFFFFF;} #define ISPORSTATE(t) (ISPORINT((t)[(d_sv_nints-1))) #define ISPORSTATE_HOST(t) (ISPORINT((t)[(sv_nints-1))) #define ISPORINT(t) (((t) & 0x40000000) >> 30) #define OTHERINT(t) ((t) & 0xBFFFFFFF) #define PORINT(t) ((t) | 0x40000000) #define STATE_FLAGS_MASK (d_apply_por ? 0x3FFFFFFF : 0x7FFFFFFF) #define STRIPSTATE(t) {(t)[(d_sv_nints-1)] = (t)[(d_sv_nints-1)] & STATE_FLAGS_MASK;} #define STRIPPEDSTATE(t, i) ((i == d_sv_nints-1) ? ((t)[i] & STATE_FLAGS_MASK) : (t)[i]) #define STRIPPEDENTRY(t, i) ((i == d_sv_nints-1) ? ((t) & STATE_FLAGS_MASK) : (t)) #define STRIPPEDENTRY_HOST(t, i) ((i == sv_nints-1) ? ((t) & (apply_por ? 0x3FFFFFFF : 0x7FFFFFFF)) : (t)) #define NEWSTATEPART(t, i) (((i) == d_sv_nints-1) ? ((t)[d_sv_nints-1] | 0x80000000) : (t)[(i)]) #define COMPAREENTRIES(t1, t2) (((t1) & STATE_FLAGS_MASK) == ((t2) & STATE_FLAGS_MASK)) #define GETSYNCRULE(a, t, i) GETBITS(a, t, (i)*d_nr_procs, d_nr_procs) // HASH TABLE MACROS // Return 0 if not found, bit 2 is flag for new state, bit 3 is a flag for POR state, 8 if cache is full __device__ inttype STOREINCACHE(volatile inttype* t, inttype* cache, inttype* address) { inttype bi, bj, bk, bl, bitmask; indextype hashtmp; STRIPSTATE(t); hashtmp = 0; for (bi = 0; bi < d_sv_nints; bi++) { hashtmp += t[bi]; hashtmp <<= 5; } bitmask = d_sv_nints*((inttype) (hashtmp % ((d_shared_q_size - CACHEOFFSET) / d_sv_nints))); SETNEWSTATE(t); bl = 0; while (bl < CACHERETRYFREQ) { bi = atomicCAS((inttype *) &cache[bitmask+(d_sv_nints-1)], EMPTYVECT32, t[d_sv_nints-1]); if (bi == EMPTYVECT32) { for (bj = 0; bj < d_sv_nints-1; bj++) { cache[bitmask+bj] = t[bj]; } *address = bitmask; return 0; } if (COMPAREENTRIES(bi, t[d_sv_nints-1])) { if (d_sv_nints == 1) { *address = bitmask; return 1 + (ISNEWINT(bi) << 1) + (ISPORINT(bi) << 2); } else { for (bj = 0; bj < d_sv_nints-1; bj++) { if (cache[bitmask+bj] != (t)[bj]) { break; } } if (bj == d_sv_nints-1) { *address = bitmask; return 1 + (ISNEWINT(bi) << 1) + (ISPORINT(bi) << 2); } } } if (!ISNEWINT(bi)) { bj = atomicCAS((inttype *) &cache[bitmask+(d_sv_nints-1)], bi, t[d_sv_nints-1]); if (bi == bj) { for (bk = 0; bk < d_sv_nints-1; bk++) { cache[bitmask+bk] = t[bk]; } *address = bitmask; return 0; } } bl++; bitmask += d_sv_nints; if ((bitmask+(d_sv_nints-1)) >= (d_shared_q_size - CACHEOFFSET)) { bitmask = 0; } } return 8; } // Mark the state in the cache according to markNew // This function is used while applying POR to decide whether the cycle proviso // is satisfied. __device__ void MARKINCACHE(volatile inttype* t, inttype* cache, int markNew) { inttype bi, bj, bl, bitmask; indextype hashtmp; STRIPSTATE(t); hashtmp = 0; for (bi = 0; bi < d_sv_nints; bi++) { hashtmp += t[bi]; hashtmp <<= 5; } bitmask = d_sv_nints*((inttype) (hashtmp % ((d_shared_q_size - CACHEOFFSET) / d_sv_nints))); SETNEWSTATE(t); bl = 0; while (bl < CACHERETRYFREQ) { bi = cache[bitmask+(d_sv_nints-1)]; if (COMPAREENTRIES(bi, t[d_sv_nints-1])) { for (bj = 0; bj < d_sv_nints-1; bj++) { if (cache[bitmask+bj] != (t)[bj]) { break; } } if (bj == d_sv_nints-1) { if(markNew) { cache[bitmask+(d_sv_nints-1)] = NEWINT(OTHERINT(cache[bitmask+(d_sv_nints-1)] & STATE_FLAGS_MASK)); } else if(ISPORINT(bi) && ISNEWINT(bi)){ atomicCAS((inttype*) &cache[bitmask+(d_sv_nints-1)], bi, OLDINT(bi)); } return; } } bl++; bitmask += d_sv_nints; if ((bitmask+(d_sv_nints-1)) >= (d_shared_q_size - CACHEOFFSET)) { bitmask = 0; } } } // hash functions use bj variable #define FIRSTHASH(a, t) { hashtmp = 0; \ for (bj = 0; bj < d_sv_nints; bj++) { \ hashtmp += STRIPPEDSTATE(t,bj); \ hashtmp <<= 5; \ } \ hashtmp = (indextype) (d_h[0]*hashtmp+d_h[1]); \ (a) = WARPSIZE*((inttype)(hashtmp % P) % d_nrbuckets); \ } #define FIRSTHASHHOST(a) { indextype hashtmp = 0; \ hashtmp = (indextype) h[1]; \ (a) = WARPSIZE*((inttype) ((hashtmp % P) % q_size/WARPSIZE)); \ } #define HASHALL(a, i, t) { hashtmp = 0; \ for (bj = 0; bj < d_sv_nints; bj++) { \ hashtmp += STRIPPEDSTATE(t,bj); \ hashtmp <<= 5; \ } \ hashtmp = (indextype) (shared[HASHCONSTANTSOFFSET+(2*(i))]*(hashtmp)+shared[HASHCONSTANTSOFFSET+(2*(i))+1]); \ (a) = WARPSIZE*((inttype)(hashtmp % P) % d_nrbuckets); \ } #define HASHFUNCTION(a, i, t) ((HASHALL((a), (i), (t)))) #define COMPAREVECTORS(a, t1, t2) { (a) = 1; \ for (bk = 0; bk < d_sv_nints-1; bk++) { \ if ((t1)[bk] != (t2)[bk]) { \ (a) = 0; break; \ } \ } \ if ((a)) { \ if (STRIPPEDSTATE((t1),bk) != STRIPPEDSTATE((t2),bk)) { \ (a) = 0; \ } \ } \ } // check if bucket element associated with lane is a valid position to store data #define LANEPOINTSTOVALIDBUCKETPOS (HALFLANE < ((HALFWARPSIZE / d_sv_nints)*d_sv_nints)) __device__ inttype LANE_POINTS_TO_EL(inttype i) { if (i < HALFWARPSIZE / d_sv_nints) { return (LANE >= i*d_sv_nints && LANE < (i+1)*d_sv_nints); } else { return (LANE >= HALFWARPSIZE+(i-(HALFWARPSIZE / d_sv_nints))*d_sv_nints && LANE < HALFWARPSIZE+(i-(HALFWARPSIZE / d_sv_nints)+1)*d_sv_nints); } } // start position of element i in bucket #define STARTPOS_OF_EL_IN_BUCKET(i) ((i < (HALFWARPSIZE / d_sv_nints)) ? (i*d_sv_nints) : (HALFWARPSIZE + (i-(HALFWARPSIZE/d_sv_nints))*d_sv_nints)) #define STARTPOS_OF_EL_IN_BUCKET_HOST(i) ((i < (HALFWARPSIZE / sv_nints)) ? (i*sv_nints) : (HALFWARPSIZE + (i-(HALFWARPSIZE/sv_nints))*sv_nints)) // find or put element, warp version. t is element stored in block cache __device__ inttype FINDORPUT_WARP(inttype* t, inttype* d_q, volatile inttype* d_newstate_flags, inttype claim_work) { inttype bi, bj, bk, bl, bitmask; indextype hashtmp; inttype hash; BucketEntryStatus threadstatus; // prepare bitmask once to reason about results of threads in the same (state vector) group bitmask = 0; if (LANEPOINTSTOVALIDBUCKETPOS) { SETBITS(LANE-ENTRY_ID, LANE-ENTRY_ID+d_sv_nints, bitmask); } for (bi = 0; bi < NR_HASH_FUNCTIONS; bi++) { HASHFUNCTION(hash, bi, t); bl = d_q[hash+LANE]; bk = __ballot(STRIPPEDENTRY(bl, ENTRY_ID) == STRIPPEDSTATE(t, ENTRY_ID)); // threadstatus is used to determine whether full state vector has been found threadstatus = EMPTY; if (LANEPOINTSTOVALIDBUCKETPOS) { if ((bk & bitmask) == bitmask) { threadstatus = FOUND; } } if (__ballot(threadstatus == FOUND) != 0) { // state vector has been found in bucket. mark local copy as old. if (LANE == 0) { SETOLDSTATE(t); } return 1; } // try to find empty position to insert new state vector threadstatus = (bl == EMPTYVECT32 && LANEPOINTSTOVALIDBUCKETPOS) ? EMPTY : TAKEN; // let bk hold the smallest index of an available empty position bk = __ffs(__ballot(threadstatus == EMPTY)); while (bk != 0) { // write the state vector bk--; if (LANE >= bk && LANE < bk+d_sv_nints) { bl = atomicCAS(&(d_q[hash+LANE]), EMPTYVECT32, t[ENTRY_ID]); if (bl == EMPTYVECT32) { // success if (ENTRY_ID == d_sv_nints-1) { SETOLDSTATE(t); } // try to claim the state vector for future work bl = OPENTILELEN; if (ENTRY_ID == d_sv_nints-1) { // try to increment the OPENTILECOUNT counter if (claim_work && (bl = atomicAdd((inttype *) &OPENTILECOUNT, d_sv_nints)) < OPENTILELEN) { d_q[hash+LANE] = t[d_sv_nints-1]; } else { // There is work available for some block __threadfence(); d_newstate_flags[(hash / blockDim.x) % gridDim.x] = 1; } } // all active threads read the OPENTILECOUNT value of the last thread, and possibly store their part of the vector in the shared memory bl = __shfl(bl, LANE-ENTRY_ID+d_sv_nints-1); if (bl < OPENTILELEN) { // write part of vector to shared memory shared[OPENTILEOFFSET+bl+ENTRY_ID] = NEWSTATEPART(t, ENTRY_ID); } // write was successful. propagate this to the whole warp by setting threadstatus to FOUND threadstatus = FOUND; } else { // write was not successful. check if the state vector now in place equals the one we are trying to insert bk = __ballot(STRIPPEDENTRY(bl, ENTRY_ID) == STRIPPEDSTATE(t, ENTRY_ID)); if ((bk & bitmask) == bitmask) { // state vector has been found in bucket. mark local copy as old. if (LANE == bk) { SETOLDSTATE(t); } // propagate this result to the whole warp threadstatus = FOUND; } else { // state vector is different, and position in bucket is taken threadstatus = TAKEN; } } } // check if the state vector was either encountered or inserted if (__ballot(threadstatus == FOUND) != 0) { return 1; } // recompute bk bk = __ffs(__ballot(threadstatus == EMPTY)); } } return 0; } // find element, warp version. t is element stored in block cache // return 0 if not found or found and new, 1 if found and old __device__ inttype FIND_WARP(inttype* t, inttype* d_q) { inttype bi, bj, bk, bl, bitmask; indextype hashtmp; BucketEntryStatus threadstatus; // prepare bitmask once to reason about results of threads in the same (state vector) group bitmask = 0; if (LANEPOINTSTOVALIDBUCKETPOS) { SETBITS(LANE-ENTRY_ID, LANE-ENTRY_ID+d_sv_nints, bitmask); } for (bi = 0; bi < NR_HASH_FUNCTIONS; bi++) { HASHFUNCTION(hashtmp, bi, t); bl = d_q[hashtmp+LANE]; bk = __ballot(STRIPPEDENTRY(bl, ENTRY_ID) == STRIPPEDSTATE(t, ENTRY_ID)); // threadstatus is used to determine whether full state vector has been found threadstatus = EMPTY; if (LANEPOINTSTOVALIDBUCKETPOS) { if ((bk & bitmask) == bitmask) { threadstatus = FOUND; } } if (__ballot(threadstatus == FOUND) != 0) { // state vector has been found in bucket. mark local copy as old. if (threadstatus == FOUND & ISNEWINT(bl) == 0 & ENTRY_ID == d_sv_nints - 1) { SETOLDSTATE(t); } SETPORSTATE(t); return __ballot(threadstatus == FOUND & ISNEWINT(bl) == 0 & ENTRY_ID == d_sv_nints - 1); } // try to find empty position threadstatus = (bl == EMPTYVECT32 && LANEPOINTSTOVALIDBUCKETPOS) ? EMPTY : TAKEN; if(__any(threadstatus == EMPTY)) { // There is an empty slot in this bucket and the state vector was not found // State will also not be found after rehashing, so we return 0 SETPORSTATE(t); return 0; } } SETPORSTATE(t); return 0; } // macro to print state vector #define PRINTVECTOR(s) { printf ("("); \ for (bk = 0; bk < d_nr_procs; bk++) { \ GETSTATEVECTORSTATE(bj, (s), bk) \ printf ("%d", bj); \ if (bk < (d_nr_procs-1)) { \ printf (","); \ } \ } \ printf (")\n"); \ } int vmem = 0; // GPU textures texture<inttype, 1, cudaReadModeElementType> tex_proc_offsets_start; texture<inttype, 1, cudaReadModeElementType> tex_proc_offsets; texture<inttype, 1, cudaReadModeElementType> tex_proc_trans_start; texture<inttype, 1, cudaReadModeElementType> tex_proc_trans; texture<inttype, 1, cudaReadModeElementType> tex_syncbits_offsets; texture<inttype, 1, cudaReadModeElementType> tex_syncbits; /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } //wrapper around cudaMalloc to count allocated memory and check for error while allocating int cudaMallocCount ( void ** ptr,int size) { cudaError_t err = cudaSuccess; vmem += size; err = cudaMalloc(ptr,size); if (err) { printf("Error %s at line %d in file %s\n", cudaGetErrorString(err), __LINE__, __FILE__); exit(1); } fprintf (stdout, "allocated %d\n", size); return size; } //test function to print a given state vector void print_statevector(FILE* stream, inttype *state, inttype *firstbit_statevector, inttype nr_procs, inttype sv_nints, inttype apply_por) { inttype i, s, bitmask; for (i = 0; i < nr_procs; i++) { bitmask = 0; if (firstbit_statevector[i]/INTSIZE == firstbit_statevector[i+1]/INTSIZE) { bitmask = (((1<<(firstbit_statevector[i+1] % INTSIZE))-1)^((1<<(firstbit_statevector[i] % INTSIZE))-1)); s = (state[firstbit_statevector[i]/INTSIZE] & bitmask) >> (firstbit_statevector[i] % INTSIZE); } else { bitmask = 1 << (firstbit_statevector[i+1] % INTSIZE); s = (state[firstbit_statevector[i]/INTSIZE] >> (firstbit_statevector[i] % INTSIZE) | (state[firstbit_statevector[i+1]/INTSIZE] & bitmask) << (INTSIZE - (firstbit_statevector[i] % INTSIZE))); \ } fprintf (stream, "%d", s); if (i < (nr_procs-1)) { fprintf (stream, ","); } } fprintf (stream, " "); for (i = 0; i < sv_nints; i++) { fprintf (stream, "%d ", STRIPPEDENTRY_HOST(state[i], i)); } fprintf (stream, "\n"); } //test function to print the contents of the device queue void print_queue(inttype *d_q, inttype q_size, inttype *firstbit_statevector, inttype nr_procs, inttype sv_nints, inttype apply_por) { inttype *q_test = (inttype*) malloc(sizeof(inttype)*q_size); cudaMemcpy(q_test, d_q, q_size*sizeof(inttype), cudaMemcpyDeviceToHost); inttype nw; int count = 0; int newcount = 0; for (inttype i = 0; i < (q_size/WARPSIZE); i++) { for (inttype j = 0; j < NREL_IN_BUCKET_HOST; j++) { if (q_test[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)+(sv_nints-1)] != EMPTYVECT32) { count++; nw = ISNEWSTATE_HOST(&q_test[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)]); if (nw) { newcount++; fprintf (stdout, "new: "); } print_statevector(stdout, &(q_test[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)]), firstbit_statevector, nr_procs, sv_nints, apply_por); } } } fprintf (stdout, "nr. of states in hash table: %d (%d unexplored states)\n", count, newcount); } //test function to print the contents of the device queue void print_local_queue(FILE* stream, inttype *q, inttype q_size, inttype *firstbit_statevector, inttype nr_procs, inttype sv_nints, inttype apply_por) { int count = 0, newcount = 0; inttype nw; for (inttype i = 0; i < (q_size/WARPSIZE); i++) { for (inttype j = 0; j < NREL_IN_BUCKET_HOST; j++) { if (q[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)+(sv_nints-1)] != EMPTYVECT32) { count++; nw = ISNEWSTATE_HOST(&q[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)]); if (nw) { newcount++; fprintf (stream, "new: "); } print_statevector(stream, &(q[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)]), firstbit_statevector, nr_procs, sv_nints, apply_por); } } } fprintf (stream, "nr. of states in hash table: %d (%d unexplored states)\n", count, newcount); } //test function to count the contents of the device queue void count_queue(inttype *d_q, inttype q_size, inttype *firstbit_statevector, inttype nr_procs, inttype sv_nints) { inttype *q_test = (inttype*) malloc(sizeof(inttype)*q_size); cudaMemcpy(q_test, d_q, q_size*sizeof(inttype), cudaMemcpyDeviceToHost); int count = 0; for (inttype i = 0; i < (q_size/WARPSIZE); i++) { for (inttype j = 0; j < NREL_IN_BUCKET_HOST; j++) { if (q_test[(i*WARPSIZE)+STARTPOS_OF_EL_IN_BUCKET_HOST(j)+(sv_nints-1)] != EMPTYVECT32) { count++; } } } fprintf (stdout, "nr. of states in hash table: %d\n", count); } //test function to count the contents of the host queue void count_local_queue(inttype *q, inttype q_size, inttype *firstbit_statevector, inttype nr_procs, inttype sv_nints) { int count = 0, newcount = 0; inttype nw; inttype nrbuckets = q_size / WARPSIZE; inttype nrels = NREL_IN_BUCKET_HOST; for (inttype i = 0; i < nrbuckets; i++) { for (inttype j = 0; j < nrels; j++) { inttype elpos = STARTPOS_OF_EL_IN_BUCKET_HOST(j); inttype abselpos = (i*WARPSIZE)+elpos+sv_nints-1; inttype q_abselpos = q[abselpos]; if (q_abselpos != EMPTYVECT32) { count++; nw = ISNEWSTATE_HOST(&q[(i*WARPSIZE)+elpos]); if (nw) { newcount++; } } } } fprintf (stdout, "nr. of states in hash table: %d (%d unexplored states)\n", count, newcount); } /** * CUDA kernel function to initialise the queue */ __global__ void init_queue(inttype *d_q, inttype n_elem) { inttype nthreads = blockDim.x*gridDim.x; inttype i = (blockIdx.x *blockDim.x) + threadIdx.x; for(; i < n_elem; i += nthreads) { d_q[i] = (inttype) EMPTYVECT32; } } /** * CUDA kernel to store initial state in hash table */ __global__ void store_initial(inttype *d_q, inttype *d_h, inttype *d_newstate_flags, inttype blockdim, inttype griddim) { inttype bj, hash; indextype hashtmp; inttype state[MAX_SIZE]; for (bj = 0; bj < d_sv_nints; bj++) { state[bj] = 0; } SETNEWSTATE(state); FIRSTHASH(hash, state); for (bj = 0; bj < d_sv_nints; bj++) { d_q[hash+bj] = state[bj]; } d_newstate_flags[(hash / blockdim) % griddim] = 1; } /** * Kernel that counts the amount of states in global memory */ __global__ void count_states(inttype *d_q, inttype *result) { if(threadIdx.x == 0) { shared[0] = 0; } __syncthreads(); int localResult = 0; for(int i = GLOBAL_WARP_ID; i < d_nrbuckets; i += NR_WARPS) { int tmp = d_q[i*WARPSIZE+LANE]; if (ENTRY_ID == (d_sv_nints-1) && tmp != EMPTYVECT32) { localResult++; } } atomicAdd((unsigned int*)shared, localResult); __syncthreads(); if(threadIdx.x == 0) { atomicAdd(result, shared[0]); } } // When the cache overflows, use the whole warp to store states to global memory __device__ void store_cache_overflow_warp(inttype *d_q, volatile inttype *d_newstate_flags, int has_overflow) { while(int c = __ballot(has_overflow)) { int active_lane = __ffs(c) - 1; int bj = FINDORPUT_WARP((inttype*) &shared[TGTSTATEOFFSET + (threadIdx.x-LANE+active_lane)*d_sv_nints], d_q, d_newstate_flags, 0); if(LANE == active_lane) { has_overflow = 0; if(bj == 0) { CONTINUE = 2; } } } } // Copy all states from the cache to global memory __device__ void copy_cache_to_global(inttype *d_q, inttype* cache, volatile inttype *d_newstate_flags) { int k = (d_shared_q_size-CACHEOFFSET)/d_sv_nints; for (int i = WARP_ID; i * WARPSIZE < k; i += (blockDim.x / WARPSIZE)) { int have_new_state = i * WARPSIZE + LANE < k && ISNEWSTATE(&cache[(i*WARPSIZE+LANE)*d_sv_nints]); while (int c = __ballot(have_new_state)) { int active_lane = __ffs(c) - 1; if(FINDORPUT_WARP((inttype*) &cache[(i*WARPSIZE+active_lane)*d_sv_nints], d_q, d_newstate_flags, 1) == 0) { CONTINUE = 2; } if (LANE == active_lane) { have_new_state = 0; } } } } /** * CUDA kernel function for BFS iteration state gathering * Order of data in the shared queue: * (0. index of process LTS states sizes) * (1. index of sync rules offsets) * (2. index of sync rules) * (1. index of open queue tile) * 0. the 'iterations' flag to count the number of iterations so far (nr of tiles processed by SM) * 1. the 'continue' flag for thread work * (4. index of threads buffer) * (5. index of hash table) * 2. constants for d_q hash functions (2 per function, in total 8 by default) * 3. state vector offsets (nr_procs+1 elements) * 4. sizes of states in process LTS states (nr_procs elements) * (9. sync rules + offsets (nr_syncbits_offsets + nr_syncbits elements)) * 5. tile of open queue to be processed by block (sv_nints*(blockDim.x / nr_procs) elements) * 6. buffer for threads ((blockDim.x*max_buf_ints)+(blockDim.x/nr_procs) elements) * 7. hash table */ __global__ void __launch_bounds__(512, 2) gather(inttype *d_q, const inttype *d_h, const inttype *d_bits_state, const inttype *d_firstbit_statevector, inttype *d_contBFS, inttype *d_property_violation, volatile inttype *d_newstate_flags, inttype *d_worktiles, const inttype scan) { inttype i, k, l, index, offset1, offset2, tmp, cont, act, sync_offset1, sync_offset2; volatile inttype* src_state = &shared[OPENTILEOFFSET+d_sv_nints*GROUP_GID]; volatile inttype* tgt_state = &shared[TGTSTATEOFFSET+threadIdx.x*d_sv_nints]; inttype* cache = (inttype*) &shared[CACHEOFFSET]; inttype bitmask, bi; int pos; int tbgs = THREADBUFFERGROUPSTART(threadIdx.x); // TODO // is at least one outgoing transition enabled for a given state (needed to detect deadlocks) inttype outtrans_enabled; // Reset the shared variables if (threadIdx.x < SH_OFFSET) { shared[threadIdx.x] = 0; } // Load the hash constants into shared memory for (int j = threadIdx.x; j < HASHCONSTANTSLEN; j += blockDim.x) { shared[j+HASHCONSTANTSOFFSET] = d_h[j]; } // Load the state sizes and offsets into shared memory for (int j = threadIdx.x; j < VECTORPOSLEN; j += blockDim.x) { VECTORSTATEPOS(j) = d_firstbit_statevector[j]; } for (int j = threadIdx.x; j < LTSSTATESIZELEN; j += blockDim.x) { STATESIZE(j) = d_bits_state[j]; } // Clean the cache for (int j = threadIdx.x; j < (d_shared_q_size - (cache-shared)); j += blockDim.x) { cache[j] = EMPTYVECT32; } if(scan) { // Copy the work tile from global mem if (threadIdx.x < OPENTILELEN + LASTSEARCHLEN) { shared[OPENTILEOFFSET+threadIdx.x] = d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + threadIdx.x]; } if(threadIdx.x == 0) { OPENTILECOUNT = d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + OPENTILELEN + LASTSEARCHLEN]; } } else if (threadIdx.x < OPENTILELEN+LASTSEARCHLEN) { // On first run: initialize the work tile to empty shared[OPENTILEOFFSET+threadIdx.x] = threadIdx.x < OPENTILELEN ? EMPTYVECT32 : 0; } __syncthreads(); while (ITERATIONS < d_kernel_iters) { if (threadIdx.x == 0 && OPENTILECOUNT < OPENTILELEN && d_newstate_flags[blockIdx.x]) { // Indicate that we are scanning d_newstate_flags[blockIdx.x] = 2; SCAN = 1; } __syncthreads(); // Scan the open set for work; we use the OPENTILECOUNT flag at this stage to count retrieved elements if (SCAN) { inttype last_search_location = shared[LASTSEARCHOFFSET + WARP_ID]; // This block should be able to find a new state int found_new_state = 0; for (i = GLOBAL_WARP_ID; i < d_nrbuckets && OPENTILECOUNT < OPENTILELEN; i += NR_WARPS) { int loc = i + last_search_location; if(loc >= d_nrbuckets) { last_search_location = -i + GLOBAL_WARP_ID; loc = i + last_search_location; } tmp = d_q[loc*WARPSIZE+LANE]; l = EMPTYVECT32; if (ENTRY_ID == (d_sv_nints-1)) { if (ISNEWINT(tmp)) { found_new_state = 1; // try to increment the OPENTILECOUNT counter, if successful, store the state l = atomicAdd((uint32_t *) &OPENTILECOUNT, d_sv_nints); if (l < OPENTILELEN) { d_q[loc*WARPSIZE+LANE] = OLDINT(tmp); } } } // all threads read the OPENTILECOUNT value of the 'tail' thread, and possibly store their part of the vector in the shared memory if (LANEPOINTSTOVALIDBUCKETPOS) { l = __shfl(l, LANE-ENTRY_ID+d_sv_nints-1); if (l < OPENTILELEN) { // write part of vector to shared memory shared[OPENTILEOFFSET+l+ENTRY_ID] = tmp; } } } if(i < d_nrbuckets) { last_search_location = i - GLOBAL_WARP_ID; } else { last_search_location = 0; } if(LANE == 0) { // Store the last search location, so we can continue from that point later on shared[LASTSEARCHOFFSET + WARP_ID] = last_search_location; } if(found_new_state || i < d_nrbuckets) { WORKSCANRESULT = 1; } } __syncthreads(); // if work has been retrieved, indicate this if (threadIdx.x == 0) { if (OPENTILECOUNT > 0) { (*d_contBFS) = 1; } if(SCAN && WORKSCANRESULT == 0 && d_newstate_flags[blockIdx.x] == 2) { // Scanning has completed and no new states were found by this block, // save this information to prevent unnecessary scanning later on d_newstate_flags[blockIdx.x] = 0; } else { WORKSCANRESULT = 0; } } // is the thread part of an 'active' group? offset1 = 0; offset2 = 0; // Reset the whole thread buffer (shared + private) int start = THREADBUFFEROFFSET; int end = THREADBUFFEROFFSET + THREADBUFFERLEN; for(int j = start + threadIdx.x; j < end; j+=blockDim.x) { shared[j] = 0; } if (THREADINGROUP) { // Is there work? if (ISSTATE(src_state)) { // Gather the required transition information for all states in the tile i = tex1Dfetch(tex_proc_offsets_start, GROUP_ID); // Determine process state GETSTATEVECTORSTATE(cont, src_state, GROUP_ID); // Offset position index = cont/(INTSIZE/d_nbits_offset); pos = cont - (index*(INTSIZE/d_nbits_offset)); tmp = tex1Dfetch(tex_proc_offsets, i+index); GETTRANSOFFSET(offset1, tmp, pos); if (pos == (INTSIZE/d_nbits_offset)-1) { tmp = tex1Dfetch(tex_proc_offsets, i+index+1); GETTRANSOFFSET(offset2, tmp, 0); } else { GETTRANSOFFSET(offset2, tmp, pos+1); } } } // variable cont is used to indicate whether the buffer content of this thread still needs processing cont = 0; outtrans_enabled = 0; // First, generate successors following from local actions while (1) { i = 1; if(offset1 < offset2) { tmp = tex1Dfetch(tex_proc_trans, offset1); GETPROCTRANSSYNC(i, tmp); } if (__any(i == 0)) { if(i == 0) { // no deadlock outtrans_enabled = 1; // construct state for (int j = 0; j < d_sv_nints; j++) { tgt_state[j] = src_state[j]; } offset1++; } // loop over this transentry for (int j = 0; __any(i == 0 && j < NR_OF_STATES_IN_TRANSENTRY(GROUP_ID)); j++) { if(i == 0) { GETPROCTRANSSTATE(pos, tmp, j, GROUP_ID); if (pos > 0) { SETSTATEVECTORSTATE(tgt_state, GROUP_ID, pos-1); // check for violation of safety property, if required if (d_property == SAFETY) { if (GROUP_ID == d_nr_procs-1) { // pos contains state id + 1 // error state is state 1 if (pos == 2) { // error state found (*d_property_violation) = 1; } } } // store tgt_state in cache // if k == 8, cache is full, immediately store in global hash table k = STOREINCACHE(tgt_state, cache, &bi); } else { i = 1; } } store_cache_overflow_warp(d_q, d_newstate_flags, i == 0 && k == 8); } } else { break; } } // Now there are only synchronizing actions left act = 1 << d_bits_act; // While the hash table is not full and there are transitions left, // explore those transitions while (CONTINUE != 2 && __any(offset1 < offset2 || cont)) { if (offset1 < offset2 && !cont) { // Fill the buffer with transitions with the same action label tmp = tex1Dfetch(tex_proc_trans, offset1); GETPROCTRANSACT(act, tmp); // store transition entry THREADBUFFERGROUPPOS(GROUP_ID,0) = tmp; cont = 1; offset1++; bitmask = act; for (int j = 1; j < d_max_buf_ints; j++) { tmp = 0; if(offset1 < offset2 && act == bitmask) { tmp = tex1Dfetch(tex_proc_trans, offset1); GETPROCTRANSACT(bitmask, tmp); if (act == bitmask) { offset1++; } else { tmp = 0; } } THREADBUFFERGROUPPOS(GROUP_ID,j) = tmp; j++; } } int sync_act = act; if (__popc((__ballot(cont) >> (LANE - GROUP_ID)) & ((1 << d_nr_procs) - 1)) > 1) { // Find the smallest 'sync_act' with butterfly reduction for(int j = 1; j < d_nr_procs; j<<=1) { sync_act = min(__shfl(sync_act, GTL((GROUP_ID + j) % d_nr_procs)), sync_act); } } else { // Only one process with synchronizing transitions left, there will // be no more successors from this state cont = 0; offset1 = offset2; sync_act = 1 << d_bits_act; } // Now, we have obtained the info needed to combine process transitions sync_offset1 = sync_offset2 = 0; // Find out which processes have the smallest 'act' int proc_enabled = (__ballot(act == sync_act) >> (LANE - GROUP_ID)) & ((1 << d_nr_procs) - 1); // Only generate synchronizing successors if there are more that two processes with 'sync_act' enabled if(sync_act < (1 << d_bits_act) && (__popc(proc_enabled) >= 2)) { // syncbits Offset position i = sync_act/(INTSIZE/d_nbits_syncbits_offset); pos = sync_act - (i*(INTSIZE/d_nbits_syncbits_offset)); l = tex1Dfetch(tex_syncbits_offsets, i); GETSYNCOFFSET(sync_offset1, l, pos); pos++; if (pos == (INTSIZE/d_nbits_syncbits_offset)) { l = tex1Dfetch(tex_syncbits_offsets, i+1); pos = 0; } GETSYNCOFFSET(sync_offset2, l, pos); } // iterate through the relevant syncbit filters for (int j = GROUP_ID;__any(sync_offset1 + j / (INTSIZE/d_nr_procs) < sync_offset2);) { tmp = 0; // Keep searching the array with sync rules until we have found an applicable rule or we have reached the end // We don't need to check for THREADINGROUP, since sync_offset1 == sync_offset2 for threads outside a group while(!(tmp != 0 && (tmp & proc_enabled) == tmp) && sync_offset1 + j / (INTSIZE/d_nr_procs) < sync_offset2) { // Fetch the rule index = tex1Dfetch(tex_syncbits, sync_offset1 + j / (INTSIZE/d_nr_procs)); GETSYNCRULE(tmp, index, j % (INTSIZE/d_nr_procs)); // Increase the counter such that threads that have not found an applicable sync rule take a smaller step j += d_nr_procs - __popc((__ballot(tmp != 0 && (tmp & proc_enabled) == tmp) >> (LANE - GROUP_ID)) & ((1 << GROUP_ID) - 1)); } // Find the smallest index j for the next iteration // We don't need to check for THREADINGROUP because there is no thread // outside of a group with GROUP_ID == d_nr_procs - 1 if(j >= d_nr_procs - 1 && THREADGROUPCOUNTER < j) { atomicMax((inttype*) &THREADGROUPCOUNTER, j); } int work_remaining = 0; int has_second_succ = 0; // start combining entries in the buffer to create target states if (tmp != 0 && (tmp & proc_enabled) == tmp) { // source state is not a deadlock outtrans_enabled = 1; // copy src_state into tgt_state for (pos = 0; pos < d_sv_nints; pos++) { tgt_state[pos] = src_state[pos]; } // construct first successor for (int rule = tmp; rule;) { pos = __ffs(rule) - 1; // get first state GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,0), 0, pos); SETSTATEVECTORSTATE(tgt_state, pos, k-1); // Check if this buffer has a second state GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,0), 1, pos); if(d_max_buf_ints > 1 && !k) { GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,1), 0, pos); } if(k) { has_second_succ |= 1 << pos; } rule &= ~(1 << pos); } work_remaining = 1 + has_second_succ; } // while we keep getting new states, store them while (__any(work_remaining)) { l = 0; if(work_remaining) { // check for violation of safety property, if required if (d_property == SAFETY) { GETSTATEVECTORSTATE(pos, tgt_state, d_nr_procs-1); if (pos == 1) { // error state found (*d_property_violation) = 1; } } // store tgt_state in cache; if i == d_shared_q_size, state was found, duplicate detected // if i == d_shared_q_size+1, cache is full, immediately store in global hash table l = STOREINCACHE(tgt_state, cache, &bitmask); if(work_remaining == 1) { // There will be no second successor work_remaining = 0; } } store_cache_overflow_warp(d_q, d_newstate_flags, l == 8); if(work_remaining) { // get next successor by finding the next combination from the buffer // Only look at processes that stored more than one successor in the buffer (has_second_succ) int rule; for (rule = has_second_succ; rule;) { pos = __ffs(rule) - 1; int curr_st; GETSTATEVECTORSTATE(curr_st, tgt_state, pos); int st = 0; int num_states_in_trans = NR_OF_STATES_IN_TRANSENTRY(pos); // We search for the position of the current state in the buffer // We don't have to compare the last position: if curr_st has not been found yet, // then it has to be in the last position for (k = 0; k < d_max_buf_ints * num_states_in_trans - 1; k++) { GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,k / num_states_in_trans), k % num_states_in_trans, pos); if (curr_st == (st-1) || st == 0) { break; } } // Try to get the next element k++; if (k < d_max_buf_ints * num_states_in_trans && st != 0) { // Retrieve next element, insert it in 'tgt_state' if it is not 0, and return result, otherwise continue GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,k / num_states_in_trans), k % num_states_in_trans, pos); if (st > 0) { SETSTATEVECTORSTATE(tgt_state, pos, st-1); break; } } // else, set this process state to first one, and continue to next process GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,0), 0, pos); SETSTATEVECTORSTATE(tgt_state, pos, st-1); rule &= ~(1 << pos); } // did we find a successor? if not, all successors have been generated if (rule == 0) { work_remaining = 0; } } } j = THREADINGROUP ? THREADGROUPCOUNTER + GROUP_ID + 1 : 0; } // only active threads should reset 'cont' if (cont && sync_act == act) { cont = 0; act = 1 << d_bits_act; THREADGROUPCOUNTER = 0; } } // have we encountered a deadlock state? // we use the shared memory to communicate this to the group leaders if (d_property == DEADLOCK) { if (THREADINGROUP) { if (ISSTATE(src_state)) { THREADBUFFERGROUPPOS(GROUP_ID, 0) = outtrans_enabled; // group leader collects results l = 0; if (GROUP_ID == 0) { for (i = 0; i < d_nr_procs; i++) { l += THREADBUFFERGROUPPOS(i, 0); } if (l == 0) { // deadlock state found (*d_property_violation) = 1; } } } } } int performed_work = OPENTILECOUNT != 0; __syncthreads(); // Reset the work tile count if (threadIdx.x == 0) { OPENTILECOUNT = 0; } __syncthreads(); // start scanning the local cache and write results to the global hash table if(performed_work) { copy_cache_to_global(d_q, cache, d_newstate_flags); } __syncthreads(); // Write empty state vector to part of the work tile that is not used if (threadIdx.x < OPENTILELEN - OPENTILECOUNT) { shared[OPENTILEOFFSET+OPENTILECOUNT+threadIdx.x] = EMPTYVECT32; } // Ready to start next iteration, if error has not occurred if (threadIdx.x == 0) { if (CONTINUE == 2) { (*d_contBFS) = 2; ITERATIONS = d_kernel_iters; } else { ITERATIONS++; } CONTINUE = 0; } __syncthreads(); } //Copy the work tile to global mem if (threadIdx.x < OPENTILELEN+LASTSEARCHLEN) { d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + threadIdx.x] = shared[OPENTILEOFFSET+threadIdx.x]; } if(threadIdx.x == 0) { d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + OPENTILELEN+LASTSEARCHLEN] = OPENTILECOUNT; } } __global__ void __launch_bounds__(512, 2) gather_por(inttype *d_q, inttype *d_h, inttype *d_bits_state, inttype *d_firstbit_statevector, inttype *d_proc_offsets_start, inttype *d_proc_offsets, inttype *d_proc_trans, inttype *d_syncbits_offsets, inttype *d_syncbits, inttype *d_contBFS, inttype *d_property_violation, volatile inttype *d_newstate_flags, inttype *d_worktiles, inttype scan) { inttype i, k, l, index, offset1, offset2, tmp, cont, act, sync_offset1, sync_offset2; volatile inttype* src_state = &shared[OPENTILEOFFSET+d_sv_nints*GROUP_GID]; volatile inttype* tgt_state = &shared[TGTSTATEOFFSET+threadIdx.x*d_sv_nints]; inttype* cache = (inttype*) &shared[CACHEOFFSET]; inttype bitmask, bi, bj; int pos; int tbgs = THREADBUFFERGROUPSTART(threadIdx.x); // TODO: remove this inttype TMPVAR; // is at least one outgoing transition enabled for a given state (needed to detect deadlocks) inttype outtrans_enabled; // Locally store the state sizes and syncbits if (threadIdx.x < SH_OFFSET) { shared[threadIdx.x] = 0; } for (i = threadIdx.x; i < HASHCONSTANTSLEN; i += blockDim.x) { shared[i+HASHCONSTANTSOFFSET] = d_h[i]; } for (i = threadIdx.x; i < VECTORPOSLEN; i += blockDim.x) { VECTORSTATEPOS(i) = d_firstbit_statevector[i]; } for (i = threadIdx.x; i < LTSSTATESIZELEN; i += blockDim.x) { STATESIZE(i) = d_bits_state[i]; } // Clean the cache for (i = threadIdx.x; i < (d_shared_q_size - CACHEOFFSET); i += blockDim.x) { cache[i] = EMPTYVECT32; } if(scan) { // Copy the work tile from global mem if (threadIdx.x < OPENTILELEN + LASTSEARCHLEN) { shared[OPENTILEOFFSET+threadIdx.x] = d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + threadIdx.x]; } if(threadIdx.x == 0) { OPENTILECOUNT = d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + OPENTILELEN + LASTSEARCHLEN]; } } else if (threadIdx.x < OPENTILELEN+LASTSEARCHLEN) { // On first run: initialize the work tile to empty shared[OPENTILEOFFSET+threadIdx.x] = threadIdx.x < OPENTILELEN ? EMPTYVECT32 : 0; } __syncthreads(); while (ITERATIONS < d_kernel_iters) { if (threadIdx.x == 0 && OPENTILECOUNT < OPENTILELEN && d_newstate_flags[blockIdx.x]) { // Indicate that we are scanning d_newstate_flags[blockIdx.x] = 2; SCAN = 1; } __syncthreads(); // Scan the open set for work; we use the OPENTILECOUNT flag at this stage to count retrieved elements if (SCAN) { inttype last_search_location = shared[LASTSEARCHOFFSET + WARP_ID]; // This block should be able to find a new state int found_new_state = 0; for (i = GLOBAL_WARP_ID; i < d_nrbuckets && OPENTILECOUNT < OPENTILELEN; i += NR_WARPS) { int loc = i + last_search_location; if(loc >= d_nrbuckets) { last_search_location = -i + GLOBAL_WARP_ID; loc = i + last_search_location; } tmp = d_q[loc*WARPSIZE+LANE]; l = EMPTYVECT32; if (ENTRY_ID == (d_sv_nints-1)) { if (ISNEWINT(tmp)) { found_new_state = 1; // try to increment the OPENTILECOUNT counter, if successful, store the state l = atomicAdd((uint32_t *) &OPENTILECOUNT, d_sv_nints); if (l < OPENTILELEN) { d_q[loc*WARPSIZE+LANE] = OLDINT(tmp); } } } // all threads read the OPENTILECOUNT value of the 'tail' thread, and possibly store their part of the vector in the shared memory if (LANEPOINTSTOVALIDBUCKETPOS) { l = __shfl(l, LANE-ENTRY_ID+d_sv_nints-1); if (l < OPENTILELEN) { // write part of vector to shared memory shared[OPENTILEOFFSET+l+ENTRY_ID] = tmp; } } } if(i < d_nrbuckets) { last_search_location = i - GLOBAL_WARP_ID; } else { last_search_location = 0; } if(LANE == 0) { shared[LASTSEARCHOFFSET + WARP_ID] = last_search_location; } if(found_new_state || i < d_nrbuckets) { WORKSCANRESULT = 1; } } __syncthreads(); // if work has been retrieved, indicate this if (threadIdx.x == 0) { if (OPENTILECOUNT > 0) { (*d_contBFS) = 1; } if(SCAN && WORKSCANRESULT == 0 && d_newstate_flags[blockIdx.x] == 2) { // Scanning has completed and no new states were found by this block, // save this information to prevent unnecessary scanning later on d_newstate_flags[blockIdx.x] = 0; } else { WORKSCANRESULT = 0; } scan = 0; } // is the thread part of an 'active' group? offset1 = 0; offset2 = 0; // Reset the whole thread buffer (shared + private) int start = THREADBUFFEROFFSET; int end = THREADBUFFEROFFSET + THREADBUFFERLEN; for(i = start + threadIdx.x; i < end; i+=blockDim.x) { shared[i] = 0; } if (THREADINGROUP) { act = 1 << d_bits_act; // Is there work? if (ISSTATE(src_state)) { // Gather the required transition information for all states in the tile i = tex1Dfetch(tex_proc_offsets_start, GROUP_ID); // Determine process state GETSTATEVECTORSTATE(cont, src_state, GROUP_ID); // Offset position index = cont/(INTSIZE/d_nbits_offset); pos = cont - (index*(INTSIZE/d_nbits_offset)); tmp = tex1Dfetch(tex_proc_offsets, i+index); GETTRANSOFFSET(offset1, tmp, pos); if (pos == (INTSIZE/d_nbits_offset)-1) { tmp = tex1Dfetch(tex_proc_offsets, i+index+1); GETTRANSOFFSET(offset2, tmp, 0); } else { GETTRANSOFFSET(offset2, tmp, pos+1); } } if (GROUP_ID == 0) { THREADGROUPPOR = 0; } } // iterate over the outgoing transitions of state 'cont' // variable cont is reused to indicate whether the buffer content of this thread still needs processing cont = 0; // while there is work to be done outtrans_enabled = 0; char generate = 1; char proviso_satisfied = 0; int cluster_trans = 1 << GROUP_ID; int orig_offset1 = offset1; while(generate > -1) { while (CONTINUE != 2 && __any(offset1 < offset2 || cont)) { if (offset1 < offset2 && !cont) { // reset act act = (1 << (d_bits_act)); // reset buffer of this thread for (l = 0; l < d_max_buf_ints; l++) { THREADBUFFERGROUPPOS(GROUP_ID, l) = 0; } } // if not sync, store in hash table // loop over all transentries while (1) { i = 1; if(offset1 < offset2 && !cont) { tmp = tex1Dfetch(tex_proc_trans, offset1); GETPROCTRANSSYNC(i, tmp); } if (__any(i == 0)) { if(i == 0) { // no deadlock outtrans_enabled = 1; // construct state for (l = 0; l < d_sv_nints; l++) { tgt_state[l] = src_state[l]; } offset1++; } // loop over this transentry for (l = 0; __any(i == 0 && l < NR_OF_STATES_IN_TRANSENTRY(GROUP_ID)); l++) { if(i == 0) { GETPROCTRANSSTATE(pos, tmp, l, GROUP_ID); if (pos > 0) { SETSTATEVECTORSTATE(tgt_state, GROUP_ID, pos-1); // check for violation of safety property, if required if (d_property == SAFETY) { if (GROUP_ID == d_nr_procs-1) { // pos contains state id + 1 // error state is state 1 if (pos == 2) { // error state found (*d_property_violation) = 1; } } } if (!d_check_cycle_proviso) { // Set proviso to 1 to indicate at least one state has been found proviso_satisfied = 1; } // store tgt_state in cache // if k == 8, cache is full, immediately store in global hash table if(generate == 1) { k = STOREINCACHE(tgt_state, cache, &bi); if(k >> 2) { proviso_satisfied |= (k >> 1) & 1; } else if (!d_check_cycle_proviso) { SETPORSTATE(&cache[bi]); } } else { MARKINCACHE(tgt_state, cache, (THREADGROUPPOR >> GROUP_ID) & 1); } } else { i = 1; } } store_cache_overflow_warp(d_q, d_newstate_flags, i == 0 && k == 8); int c; // Check cycle proviso with the whole warp while(generate && d_check_cycle_proviso && (c = __ballot(i == 0 && (k >> 2 == 0)))) { int active_lane = __ffs(c) - 1; int cache_index = __shfl(bi, active_lane); bj = FIND_WARP((inttype*) &cache[cache_index], d_q); if(LANE == active_lane) { i = 1; if(bj == 0) { proviso_satisfied = 1; } } } } } else { break; } } // i is the current relative position in the buffer for this thread i = 0; if (offset1 < offset2 && !cont) { GETPROCTRANSACT(act, tmp); // store transition entry THREADBUFFERGROUPPOS(GROUP_ID,i) = tmp; cont = 1; i++; offset1++; while (offset1 < offset2) { tmp = tex1Dfetch(tex_proc_trans, offset1); GETPROCTRANSACT(bitmask, tmp); if (act == bitmask) { THREADBUFFERGROUPPOS(GROUP_ID,i) = tmp; i++; offset1++; } else { break; } } } int sync_act = cont ? act : (1 << d_bits_act); for(i = 1; i < d_nr_procs; i<<=1) { sync_act = min(__shfl(sync_act, GTL((GROUP_ID + i) % d_nr_procs)), sync_act); } // Now, we have obtained the info needed to combine process transitions sync_offset1 = sync_offset2 = 0; int proc_enabled = (__ballot(act == sync_act) >> (LANE - GROUP_ID)) & ((1 << d_nr_procs) - 1); if(THREADINGROUP && sync_act < (1 << d_bits_act)) { // syncbits Offset position i = sync_act/(INTSIZE/d_nbits_syncbits_offset); pos = sync_act - (i*(INTSIZE/d_nbits_syncbits_offset)); l = tex1Dfetch(tex_syncbits_offsets, i); GETSYNCOFFSET(sync_offset1, l, pos); if (pos == (INTSIZE/d_nbits_syncbits_offset)-1) { l = tex1Dfetch(tex_syncbits_offsets, i+1); GETSYNCOFFSET(sync_offset2, l, 0); } else { GETSYNCOFFSET(sync_offset2, l, pos+1); } } // iterate through the relevant syncbit filters tmp = 1; for (int j = GROUP_ID;__any(sync_offset1 + j / (INTSIZE/d_nr_procs) < sync_offset2 && tmp); j+=d_nr_procs) { index = 0; if(THREADINGROUP && sync_act < (1 << d_bits_act) && sync_offset1 + j / (INTSIZE/d_nr_procs) < sync_offset2 && tmp) { index = tex1Dfetch(tex_syncbits, sync_offset1 + j / (INTSIZE/d_nr_procs)); } SETOLDSTATE(tgt_state); int has_second_succ = 0; GETSYNCRULE(tmp, index, j % (INTSIZE/d_nr_procs)); if (tmp != 0 && (tmp & proc_enabled) == tmp) { // source state is not a deadlock outtrans_enabled = 1; // start combining entries in the buffer to create target states // if sync rule applicable, construct the first successor // copy src_state into tgt_state for (pos = 0; pos < d_sv_nints; pos++) { tgt_state[pos] = src_state[pos]; } // construct first successor for (int rule = tmp; rule;) { pos = __ffs(rule) - 1; // get first state GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,0), 0, pos); SETSTATEVECTORSTATE(tgt_state, pos, k-1); GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,0), 1, pos); has_second_succ |= k; if(d_max_buf_ints > 1 && !k) { GETPROCTRANSSTATE(k, THREADBUFFERGROUPPOS(pos,1), 0, pos); has_second_succ |= k; } rule &= ~(1 << pos); } SETNEWSTATE(tgt_state); } int rule_proviso = 0; // while we keep getting new states, store them while (__any(ISNEWSTATE(tgt_state))) { l = k = TMPVAR = bitmask = 0; if(ISNEWSTATE(tgt_state)) { // check for violation of safety property, if required if (d_property == SAFETY) { GETSTATEVECTORSTATE(pos, tgt_state, d_nr_procs-1); if (pos == 1) { // error state found (*d_property_violation) = 1; } } if (!d_check_cycle_proviso) { // Set rule_proviso to 1 to indicate at least one state has been found rule_proviso = 1; } // store tgt_state in cache; if i == d_shared_q_size, state was found, duplicate detected // if i == d_shared_q_size+1, cache is full, immediately store in global hash table if(generate == 1) { TMPVAR = STOREINCACHE(tgt_state, cache, &bitmask); if(TMPVAR >> 2) { rule_proviso |= (TMPVAR >> 1) & 1; } else if (!d_check_cycle_proviso) { SETPORSTATE(&cache[bitmask]); } } else { MARKINCACHE(tgt_state, cache, (THREADGROUPPOR & tmp) == tmp); } l = 1; k = has_second_succ; if(!has_second_succ) { SETOLDSTATE(tgt_state); } } store_cache_overflow_warp(d_q, d_newstate_flags, l && TMPVAR == 8); int c; // Check cycle proviso with the whole warp while(generate && d_check_cycle_proviso && (c = __ballot(l && (TMPVAR >> 2 == 0)))) { int active_lane = __ffs(c) - 1; int cache_index = __shfl(bitmask, active_lane); bj = FIND_WARP((inttype*) &cache[cache_index], d_q); if(LANE == active_lane) { l = 0; if(bj == 0) { rule_proviso = 1; } } } if(k) { // get next successor int rule; for (rule = tmp; rule;) { pos = __ffs(rule) - 1; int curr_st; GETSTATEVECTORSTATE(curr_st, tgt_state, pos); int st = 0; for (k = 0; k < d_max_buf_ints; k++) { for (l = 0; l < NR_OF_STATES_IN_TRANSENTRY(pos); l++) { GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,k), l, pos); if (curr_st == (st-1)) { break; } } if (curr_st == (st-1)) { break; } } // Assumption: element has been found (otherwise, 'last' was not a valid successor) // Try to get the next element if (l == NR_OF_STATES_IN_TRANSENTRY(pos) - 1) { if (k >= d_max_buf_ints-1) { st = 0; } else { k++; l = 0; } } else { l++; } // Retrieve next element, insert it in 'tgt_state' if it is not 0, and return result, otherwise continue if (st != 0) { GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,k), l, pos); if (st > 0) { SETSTATEVECTORSTATE(tgt_state, pos, st-1); SETNEWSTATE(tgt_state); break; } } // else, set this process state to first one, and continue to next process GETPROCTRANSSTATE(st, THREADBUFFERGROUPPOS(pos,0), 0, pos); SETSTATEVECTORSTATE(tgt_state, pos, st-1); rule &= ~(1 << pos); } // did we find a successor? if not, set tgt_state to old if (rule == 0) { SETOLDSTATE(tgt_state); } } } for (l = 0; l < d_nr_procs; l++) { // Exchange the sync rules so every thread can update its cluster_trans int sync_rule = __shfl(tmp, GTL((GROUP_ID + l) % d_nr_procs)); int proviso = __shfl(rule_proviso, GTL((GROUP_ID + l) % d_nr_procs)); if(GETBIT(GROUP_ID, sync_rule) && sync_act == act) { cluster_trans |= sync_rule; proviso_satisfied |= proviso; } } } // only active threads should reset 'cont' if (cont && sync_act == act) { cont = 0; } } // END WHILE CONTINUE == 1 if(generate == 1 && THREADINGROUP) { // Choose a cluster for reduction if(!proviso_satisfied) { cluster_trans = cluster_trans & ~(1 << GROUP_ID); } THREADBUFFERGROUPPOS(GROUP_ID,0) = cluster_trans; __syncthreads(); proviso_satisfied = 0; int to_check = cluster_trans; while (to_check) { i = __ffs(to_check) - 1; to_check &= ~(1 << i); int cluster = THREADBUFFERGROUPPOS(i, 0); proviso_satisfied |= GETBIT(i, cluster); to_check |= cluster & ~cluster_trans & ~(1 << i); cluster_trans |= cluster; } __syncthreads(); if(!proviso_satisfied) { THREADBUFFERGROUPPOS(GROUP_ID,0) = 0; } else { THREADBUFFERGROUPPOS(GROUP_ID,0) = cluster_trans; } __syncthreads(); if(GROUP_ID == 0) { int min = d_nr_procs; int cluster = 0xFFFFFFFF >> (INTSIZE - d_nr_procs); for(i = 0; i < d_nr_procs; i++) { if(THREADBUFFERGROUPPOS(i,0) > 0 && __popc(THREADBUFFERGROUPPOS(i,0)) < min) { min = __popc(THREADBUFFERGROUPPOS(i,0)); cluster = THREADBUFFERGROUPPOS(i,0); } } THREADGROUPPOR = cluster; if(cluster < (0xFFFFFFFF >> (INTSIZE - d_nr_procs))) { // printf("Selected cluster %d for POR\n",cluster); } } __syncthreads(); } offset1 = orig_offset1; generate--; } // END while(generate > -1) // have we encountered a deadlock state? // we use the shared memory to communicate this to the group leaders if (d_property == DEADLOCK) { if (THREADINGROUP) { if (ISSTATE(src_state)) { THREADBUFFERGROUPPOS(GROUP_ID, 0) = outtrans_enabled; // group leader collects results l = 0; if (GROUP_ID == 0) { for (i = 0; i < d_nr_procs; i++) { l += THREADBUFFERGROUPPOS(i, 0); } if (l == 0) { // deadlock state found (*d_property_violation) = 1; } } } } } int performed_work = OPENTILECOUNT != 0; __syncthreads(); // Reset the open queue tile if (threadIdx.x < OPENTILELEN) { shared[OPENTILEOFFSET+threadIdx.x] = EMPTYVECT32; } if (threadIdx.x == 0) { OPENTILECOUNT = 0; } __syncthreads(); // start scanning the local cache and write results to the global hash table if(performed_work) { copy_cache_to_global(d_q, cache, d_newstate_flags); } __syncthreads(); // Ready to start next iteration, if error has not occurred if (threadIdx.x == 0) { if (CONTINUE == 2) { (*d_contBFS) = 2; ITERATIONS = d_kernel_iters; } else { ITERATIONS++; } CONTINUE = 0; } __syncthreads(); } //Copy the work tile to global mem if (threadIdx.x < OPENTILELEN+LASTSEARCHLEN) { d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + threadIdx.x] = shared[OPENTILEOFFSET+threadIdx.x]; } if(threadIdx.x == 0) { d_worktiles[(OPENTILELEN+LASTSEARCHLEN+1) * blockIdx.x + OPENTILELEN+LASTSEARCHLEN] = OPENTILECOUNT; } } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(int argc, char** argv) { FILE *fp; inttype nr_procs, bits_act, bits_statevector, sv_nints, nr_trans, proc_nrstates, nbits_offset, max_buf_ints, nr_syncbits_offsets, nr_syncbits, nbits_syncbits_offset; inttype *bits_state, *firstbit_statevector, *proc_offsets, *proc_trans, *proc_offsets_start, *syncbits_offsets, *syncbits; inttype contBFS, counted_states; char stmp[BUFFERSIZE], fn[BUFFERSIZE]; // to store constants for closed set hash functions int h[NR_HASH_FUNCTIONS*2]; // size of global hash table size_t q_size = 0; PropertyStatus check_property = NONE; // nr of iterations in single kernel run int kernel_iters = KERNEL_ITERS; int nblocks = NR_OF_BLOCKS; int nthreadsperblock = BLOCK_SIZE; // POR options int apply_por = 0; int use_cycle_proviso = 0; // level of verbosity (1=print level progress) int verbosity = 0; char* dump_file = NULL; // clock to measure time clock_t start, stop; double runtime = 0.0; // Start timer assert((start = clock())!=-1); cudaDeviceProp prop; int nDevices; // GPU side versions of the input inttype *d_bits_state, *d_firstbit_statevector, *d_proc_offsets_start, *d_proc_offsets, *d_proc_trans, *d_syncbits_offsets, *d_syncbits, *d_h; // flag to keep track of progress and whether hash table errors occurred (value==2) inttype *d_contBFS; // flags to track which blocks have new states inttype *d_newstate_flags; // flag to keep track of property verification outcome inttype *d_property_violation; // Integer to store the amount of states counted in the hash table inttype *d_counted_states; // Space to temporarily store work tiles inttype *d_worktiles; // GPU datastructures for calculation inttype *d_q; const char* help_text = "Usage: GPUexplore <model> [OPTIONS]\n" "Run state-space exploration on model (do not include the file extension).\n" "options:\n" " -d Check for deadlocks\n" " -p Check a safety property (should be embedded in the model)\n" " --por Apply partial-order reduction\n" " --cycle-proviso Apply the cycle proviso during partial-order reduction\n" " -k NUM Run NUM iterations per kernel launch (default 1)\n" " -b NUM Run the kernel on NUM blocks (default 1)\n" " -t NUM Use NUM threads per block (default 32)\n" " -q NUM Allocate NUM integers for the hash table\n" " --dump FILE Dump the state space to FILE after completing the exploration\n" " -v NUM Change the verbosity:\n" " 0 - minimal output\n" " 1 - print sequence number of each kernel launch\n" " 2 - print number of states in the hash table after each kernel launch\n" " 3 - print state vectors after each kernel launch\n" " -h, --help Show this help message\n"; if (argc == 1) { fprintf(stderr, "ERROR: No input network given!\n"); fprintf(stdout, help_text); exit(1); } else if(!strcmp(argv[1],"--help") || !strcmp(argv[1],"-h") || !strcmp(argv[1],"-?")) { fprintf(stdout, help_text); exit(0); } strcpy(fn, argv[1]); strcat(fn, ".gpf"); int i = 2; while (i < argc) { if (!strcmp(argv[i],"--help") || !strcmp(argv[i],"-h") || !strcmp(argv[i],"-?")) { fprintf(stdout, help_text); exit(0); } else if (!strcmp(argv[i],"-k")) { // if nr. of iterations per kernel run is given, store it kernel_iters = atoi(argv[i+1]); i += 2; } else if (!strcmp(argv[i],"-b")) { // store nr of blocks to be used nblocks = atoi(argv[i+1]); i += 2; } else if (!strcmp(argv[i],"-t")) { // store nr of threads per block to be used nthreadsperblock = atoi(argv[i+1]); i += 2; } else if (!strcmp(argv[i],"-q")) { // store hash table size q_size = atoll(argv[i+1]); i += 2; } else if (!strcmp(argv[i],"-v")) { // store verbosity level verbosity = atoi(argv[i+1]); if (verbosity > 3) { verbosity = 3; } i += 2; } else if (!strcmp(argv[i],"-d")) { // check for deadlocks check_property = DEADLOCK; use_cycle_proviso = 0; i += 1; } else if (!strcmp(argv[i],"-p")) { // check a property check_property = SAFETY; use_cycle_proviso = 1; i += 1; } else if (!strcmp(argv[i],"--por")) { // apply partial-order reduction apply_por = 1; i += 1; } else if (!strcmp(argv[i],"--cycle-proviso")) { // use cycle proviso if (check_property == NONE) { use_cycle_proviso = 1; } i += 1; } else if (!strcmp(argv[i],"--dump")) { dump_file = argv[i+1]; i += 2; } else { fprintf(stderr, "ERROR: unrecognized option %s\n", argv[i]); fprintf(stdout, help_text); exit(1); } } fp = fopen(fn, "r"); if (fp) { // Read the input if (fgets(stmp, BUFFERSIZE, fp) != NULL && check_property == SAFETY) { i = atoi(stmp); fprintf(stdout, "Property to check is "); if (i == 0) { fprintf(stdout, "not "); } fprintf(stdout, "a liveness property\n"); if (i == 1) { check_property = LIVENESS; } } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nr_procs = atoi(stmp); fprintf(stdout, "nr of procs: %d\n", nr_procs); } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { bits_act = atoi(stmp); fprintf(stdout, "nr of bits for transition label: %d\n", bits_act); } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { proc_nrstates = atoi(stmp); fprintf(stdout, "min. nr. of proc. states that fit in 32-bit integer: %d\n", proc_nrstates); } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { bits_statevector = atoi(stmp) + apply_por; fprintf(stdout, "number of bits needed for a state vector: %d\n", bits_statevector); } firstbit_statevector = (inttype*) malloc(sizeof(inttype)*(nr_procs+1)); for (int i = 0; i <= nr_procs; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { firstbit_statevector[i] = atoi(stmp); fprintf(stdout, "statevector offset %d: %d\n", i, firstbit_statevector[i]); } } // determine the number of integers needed for a state vector sv_nints = (bits_statevector+31) / INTSIZE; bits_state = (inttype*) malloc(sizeof(inttype)*nr_procs); for (int i = 0; i < nr_procs; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { bits_state[i] = atoi(stmp); fprintf(stdout, "bits for states of process LTS %d: %d\n", i, bits_state[i]); } } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nbits_offset = atoi(stmp); fprintf(stdout, "size of offset in process LTSs: %d\n", nbits_offset); } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { max_buf_ints = atoi(stmp); fprintf(stdout, "maximum label-bounded branching factor: %d\n", max_buf_ints); } proc_offsets_start = (inttype*) malloc(sizeof(inttype)*(nr_procs+1)); for (int i = 0; i <= nr_procs; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { proc_offsets_start[i] = atoi(stmp); } } proc_offsets = (inttype*) malloc(sizeof(inttype)*proc_offsets_start[nr_procs]); for (int i = 0; i < proc_offsets_start[nr_procs]; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { proc_offsets[i] = atoi(stmp); } } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nr_trans = atoi(stmp); fprintf(stdout, "total number of transition entries in network: %d\n", nr_trans); } proc_trans = (inttype*) malloc(sizeof(inttype)*nr_trans); for (int i = 0; i < nr_trans; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { proc_trans[i] = atoi(stmp); } } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nbits_syncbits_offset = atoi(stmp); } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nr_syncbits_offsets = atoi(stmp); } syncbits_offsets = (inttype*) malloc(sizeof(inttype)*nr_syncbits_offsets); for (int i = 0; i < nr_syncbits_offsets; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { syncbits_offsets[i] = atoi(stmp); } } if (fgets(stmp, BUFFERSIZE, fp) != NULL) { nr_syncbits = atoi(stmp); } syncbits = (inttype*) malloc(sizeof(inttype)*nr_syncbits); for (int i = 0; i < nr_syncbits; i++) { if (fgets(stmp, BUFFERSIZE, fp) != NULL) { syncbits[i] = atoi(stmp); } } } else { fprintf(stderr, "ERROR: input network does not exist!\n"); exit(1); } // Randomly define the closed set hash functions srand(time(NULL)); for (int i = 0; i < NR_HASH_FUNCTIONS*2; i++) { h[i] = rand(); } // continue flags contBFS = 1; // Query the device properties and determine data structure sizes cudaGetDeviceCount(&nDevices); if (nDevices == 0) { fprintf (stderr, "ERROR: No CUDA compatible GPU detected!\n"); exit(1); } cudaGetDeviceProperties(&prop, 0); fprintf (stdout, "global mem: %lu\n", (uint64_t) prop.totalGlobalMem); fprintf (stdout, "shared mem per block: %d\n", (int) prop.sharedMemPerBlock); fprintf (stdout, "shared mem per SM: %d\n", (int) prop.sharedMemPerMultiprocessor); fprintf (stdout, "max. threads per block: %d\n", (int) prop.maxThreadsPerBlock); fprintf (stdout, "max. grid size: %d\n", (int) prop.maxGridSize[0]); fprintf (stdout, "nr. of multiprocessors: %d\n", (int) prop.multiProcessorCount); // determine actual nr of blocks nblocks = MAX(1,MIN(prop.maxGridSize[0],nblocks)); // Allocate memory on GPU cudaMallocCount((void **) &d_contBFS, sizeof(inttype)); cudaMallocCount((void **) &d_property_violation, sizeof(inttype)); cudaMallocCount((void **) &d_counted_states, sizeof(inttype)); cudaMallocCount((void **) &d_h, NR_HASH_FUNCTIONS*2*sizeof(inttype)); cudaMallocCount((void **) &d_bits_state, nr_procs*sizeof(inttype)); cudaMallocCount((void **) &d_firstbit_statevector, (nr_procs+1)*sizeof(inttype)); cudaMallocCount((void **) &d_proc_offsets_start, (nr_procs+1)*sizeof(inttype)); cudaMallocCount((void **) &d_proc_offsets, proc_offsets_start[nr_procs]*sizeof(inttype)); cudaMallocCount((void **) &d_proc_trans, nr_trans*sizeof(inttype)); cudaMallocCount((void **) &d_syncbits_offsets, nr_syncbits_offsets*sizeof(inttype)); cudaMallocCount((void **) &d_syncbits, nr_syncbits*sizeof(inttype)); cudaMallocCount((void **) &d_newstate_flags, nblocks*sizeof(inttype)); cudaMallocCount((void **) &d_worktiles, nblocks * (sv_nints*(nthreadsperblock/nr_procs)+nthreadsperblock/WARPSIZE+1)*sizeof(inttype)); // Copy data to GPU CUDA_CHECK_RETURN(cudaMemcpy(d_contBFS, &contBFS, sizeof(inttype), cudaMemcpyHostToDevice)) CUDA_CHECK_RETURN(cudaMemcpy(d_h, h, NR_HASH_FUNCTIONS*2*sizeof(inttype), cudaMemcpyHostToDevice)) CUDA_CHECK_RETURN(cudaMemcpy(d_bits_state, bits_state, nr_procs*sizeof(inttype), cudaMemcpyHostToDevice)) CUDA_CHECK_RETURN(cudaMemcpy(d_firstbit_statevector, firstbit_statevector, (nr_procs+1)*sizeof(inttype), cudaMemcpyHostToDevice)) CUDA_CHECK_RETURN(cudaMemcpy(d_proc_offsets_start, proc_offsets_start, (nr_procs+1)*sizeof(inttype), cudaMemcpyHostToDevice)) CUDA_CHECK_RETURN(cudaMemcpy(d_proc_offsets, proc_offsets, proc_offsets_start[nr_procs]*sizeof(inttype), cudaMemcpyHostToDevice)) CUDA_CHECK_RETURN(cudaMemcpy(d_proc_trans, proc_trans, nr_trans*sizeof(inttype), cudaMemcpyHostToDevice)) CUDA_CHECK_RETURN(cudaMemcpy(d_syncbits_offsets, syncbits_offsets, nr_syncbits_offsets*sizeof(inttype), cudaMemcpyHostToDevice)) CUDA_CHECK_RETURN(cudaMemcpy(d_syncbits, syncbits, nr_syncbits*sizeof(inttype), cudaMemcpyHostToDevice)) CUDA_CHECK_RETURN(cudaMemset(d_newstate_flags, 0, nblocks*sizeof(inttype))); CUDA_CHECK_RETURN(cudaMemset(d_worktiles, 0, nblocks * (sv_nints*(nthreadsperblock/nr_procs)+nthreadsperblock/WARPSIZE+1)*sizeof(inttype))); CUDA_CHECK_RETURN(cudaMemset(d_counted_states, 0, sizeof(inttype))); // Bind data to textures cudaBindTexture(NULL, tex_proc_offsets_start, d_proc_offsets_start, (nr_procs+1)*sizeof(inttype)); cudaBindTexture(NULL, tex_proc_offsets, d_proc_offsets, proc_offsets_start[nr_procs]*sizeof(inttype)); cudaBindTexture(NULL, tex_proc_trans, d_proc_trans, nr_trans*sizeof(inttype)); cudaBindTexture(NULL, tex_syncbits_offsets, d_syncbits_offsets, nr_syncbits_offsets*sizeof(inttype)); cudaBindTexture(NULL, tex_syncbits, d_syncbits, nr_syncbits*sizeof(inttype)); size_t available, total; cudaMemGetInfo(&available, &total); if (q_size == 0) { q_size = total / sizeof(inttype); } size_t el_per_Mb = Mb / sizeof(inttype); while(cudaMalloc((void**)&d_q, q_size * sizeof(inttype)) == cudaErrorMemoryAllocation) { q_size -= el_per_Mb; if( q_size < el_per_Mb) { // signal no free memory break; } } fprintf (stdout, "global mem queue size: %lu, number of entries: %lu\n", q_size*sizeof(inttype), (indextype) q_size); inttype shared_q_size = (int) prop.sharedMemPerMultiprocessor / sizeof(inttype) / 2; fprintf (stdout, "shared mem queue size: %lu, number of entries: %u\n", shared_q_size*sizeof(inttype), shared_q_size); fprintf (stdout, "nr. of blocks: %d, block size: %d, nr of kernel iterations: %d\n", nblocks, nthreadsperblock, kernel_iters); // copy symbols inttype tablesize = q_size; inttype nrbuckets = tablesize / WARPSIZE; cudaMemcpyToSymbol(d_nrbuckets, &nrbuckets, sizeof(inttype)); cudaMemcpyToSymbol(d_shared_q_size, &shared_q_size, sizeof(inttype)); cudaMemcpyToSymbol(d_nr_procs, &nr_procs, sizeof(inttype)); cudaMemcpyToSymbol(d_max_buf_ints, &max_buf_ints, sizeof(inttype)); cudaMemcpyToSymbol(d_sv_nints, &sv_nints, sizeof(inttype)); cudaMemcpyToSymbol(d_bits_act, &bits_act, sizeof(inttype)); cudaMemcpyToSymbol(d_nbits_offset, &nbits_offset, sizeof(inttype)); cudaMemcpyToSymbol(d_nbits_syncbits_offset, &nbits_syncbits_offset, sizeof(inttype)); cudaMemcpyToSymbol(d_kernel_iters, &kernel_iters, sizeof(inttype)); cudaMemcpyToSymbol(d_property, &check_property, sizeof(inttype)); cudaMemcpyToSymbol(d_apply_por, &apply_por, sizeof(inttype)); cudaMemcpyToSymbol(d_check_cycle_proviso, &use_cycle_proviso, sizeof(inttype)); // init the hash table init_queue<<<nblocks, nthreadsperblock>>>(d_q, q_size); store_initial<<<1,1>>>(d_q, d_h, d_newstate_flags,nthreadsperblock,nblocks); for (int i = 0; i < 2*NR_HASH_FUNCTIONS; i++) { fprintf (stdout, "hash constant %d: %d\n", i, h[i]); } FIRSTHASHHOST(i); fprintf (stdout, "hash of initial state: %d\n", i); inttype zero = 0; inttype *q_test = (inttype*) malloc(sizeof(inttype)*tablesize); int j = 0; inttype scan = 0; CUDA_CHECK_RETURN(cudaMemcpy(d_property_violation, &zero, sizeof(inttype), cudaMemcpyHostToDevice)) inttype property_violation = 0; clock_t exploration_start; assert((exploration_start = clock())!=-1); while (contBFS == 1) { CUDA_CHECK_RETURN(cudaMemcpy(d_contBFS, &zero, sizeof(inttype), cudaMemcpyHostToDevice)) if(apply_por) { gather_por<<<nblocks, nthreadsperblock, shared_q_size*sizeof(inttype)>>>(d_q, d_h, d_bits_state, d_firstbit_statevector, d_proc_offsets_start, d_proc_offsets, d_proc_trans, d_syncbits_offsets, d_syncbits, d_contBFS, d_property_violation, d_newstate_flags, d_worktiles, scan); } else { gather<<<nblocks, nthreadsperblock, shared_q_size*sizeof(inttype)>>>(d_q, d_h, d_bits_state, d_firstbit_statevector, d_contBFS, d_property_violation, d_newstate_flags, d_worktiles, scan); } // copy progress result //CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaMemcpy(&contBFS, d_contBFS, sizeof(inttype), cudaMemcpyDeviceToHost)) if (check_property > 0) { CUDA_CHECK_RETURN(cudaMemcpy(&property_violation, d_property_violation, sizeof(inttype), cudaMemcpyDeviceToHost)) if (property_violation == 1) { contBFS = 0; } } if (verbosity > 0) { if (verbosity == 1) { printf ("%d\n", j++); } else if (verbosity == 2) { cudaMemcpy(q_test, d_q, tablesize*sizeof(inttype), cudaMemcpyDeviceToHost); count_local_queue(q_test, tablesize, firstbit_statevector, nr_procs, sv_nints); } else if (verbosity == 3) { cudaMemcpy(q_test, d_q, tablesize*sizeof(inttype), cudaMemcpyDeviceToHost); print_local_queue(stdout, q_test, tablesize, firstbit_statevector, nr_procs, sv_nints, apply_por); } } scan = 1; } // determine runtime stop = clock(); runtime = (double) (stop-start)/CLOCKS_PER_SEC; fprintf (stdout, "Run time: %f\n", runtime); runtime = (double) (stop-exploration_start)/CLOCKS_PER_SEC; fprintf(stdout, "Exploration time %f\n", runtime); if (property_violation == 1) { switch (check_property) { case DEADLOCK: printf ("deadlock detected!\n"); break; case SAFETY: printf ("safety property violation detected!\n"); break; case LIVENESS: printf ("liveness property violation detected!\n"); break; } } // report error if required if (contBFS == 2) { fprintf (stderr, "ERROR: problem with hash table\n"); } CUDA_CHECK_RETURN(cudaMemset(d_counted_states, 0, sizeof(inttype))); count_states<<<((int) prop.multiProcessorCount)*8, 512, 1>>>(d_q, d_counted_states); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaMemcpy(&counted_states, d_counted_states, sizeof(inttype), cudaMemcpyDeviceToHost)); fprintf (stdout, "nr. of states in hash table: %d\n", counted_states); // Debugging functionality: print states to file if(dump_file) { FILE* fout; if((fout = fopen(dump_file, "w")) != NULL) { fprintf(stdout, "Dumping state space to file...\n"); cudaMemcpy(q_test, d_q, tablesize*sizeof(inttype), cudaMemcpyDeviceToHost); print_local_queue(fout, q_test, tablesize, firstbit_statevector, nr_procs, sv_nints, apply_por); fclose(fout); } else { fprintf(stderr, "Could not open file to dump the state space\n"); } } return 0; }
177061c7f23c3d5b1b16ffabdaa02852dc76203e.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "kernel.hip" #include "support.cu" int main (int argc, char *argv[]) { //set standard seed srand(217); Timer timer; hipError_t cuda_ret; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); float *A_h, *B_h, *C_h; float *A_d, *B_d, *C_d; size_t A_sz, B_sz, C_sz; unsigned VecSize; dim3 dim_grid, dim_block; if (argc == 1) { VecSize = 1000; } else if (argc == 2) { VecSize = atoi(argv[1]); } else { printf("\nOh no!\nUsage: ./vecAdd <Size>"); exit(0); } A_sz = VecSize; B_sz = VecSize; C_sz = VecSize; A_h = (float*) malloc( sizeof(float)*A_sz ); for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; } B_h = (float*) malloc( sizeof(float)*B_sz ); for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; } C_h = (float*) malloc( sizeof(float)*C_sz ); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf("Size Of vector: %u x %u\n ", VecSize); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); hipMalloc((void**) &A_d, sizeof(float) * A_sz); hipMalloc((void**) &B_d, sizeof(float) * B_sz); hipMalloc((void**) &C_d, sizeof(float) * C_sz); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); hipMemcpy(A_d, A_h, sizeof(float) * A_sz, hipMemcpyHostToDevice); hipMemcpy(B_d, B_h, sizeof(float) * B_sz, hipMemcpyHostToDevice); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel --------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); basicVecAdd(A_d, B_d, C_d, VecSize); //In kernel.cu cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); hipMemcpy(C_h, C_d, sizeof(float) * C_sz, hipMemcpyDeviceToHost); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); verify(A_h, B_h, C_h, VecSize); // Free memory ------------------------------------------------------------ free(A_h); free(B_h); free(C_h); hipFree(A_d); hipFree(B_d); hipFree(C_d); return 0; }
177061c7f23c3d5b1b16ffabdaa02852dc76203e.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "kernel.cu" #include "support.cu" int main (int argc, char *argv[]) { //set standard seed srand(217); Timer timer; cudaError_t cuda_ret; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); float *A_h, *B_h, *C_h; float *A_d, *B_d, *C_d; size_t A_sz, B_sz, C_sz; unsigned VecSize; dim3 dim_grid, dim_block; if (argc == 1) { VecSize = 1000; } else if (argc == 2) { VecSize = atoi(argv[1]); } else { printf("\nOh no!\nUsage: ./vecAdd <Size>"); exit(0); } A_sz = VecSize; B_sz = VecSize; C_sz = VecSize; A_h = (float*) malloc( sizeof(float)*A_sz ); for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; } B_h = (float*) malloc( sizeof(float)*B_sz ); for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; } C_h = (float*) malloc( sizeof(float)*C_sz ); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf("Size Of vector: %u x %u\n ", VecSize); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); cudaMalloc((void**) &A_d, sizeof(float) * A_sz); cudaMalloc((void**) &B_d, sizeof(float) * B_sz); cudaMalloc((void**) &C_d, sizeof(float) * C_sz); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); cudaMemcpy(A_d, A_h, sizeof(float) * A_sz, cudaMemcpyHostToDevice); cudaMemcpy(B_d, B_h, sizeof(float) * B_sz, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel --------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); basicVecAdd(A_d, B_d, C_d, VecSize); //In kernel.cu cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); cudaMemcpy(C_h, C_d, sizeof(float) * C_sz, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); verify(A_h, B_h, C_h, VecSize); // Free memory ------------------------------------------------------------ free(A_h); free(B_h); free(C_h); cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); return 0; }
f21ef3f0c02d10968452630ad0c4ae50accaa477.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * http://github.com/dusty-nv */ #include "cudaRGB.h" //------------------------------------------------------------------------------------------------------------------------- __global__ void RGBToRGBAf(uchar3* srcImage, float4* dstImage, uint32_t width, uint32_t height) { int x, y, pixel; x = (blockIdx.x * blockDim.x) + threadIdx.x; y = (blockIdx.y * blockDim.y) + threadIdx.y; pixel = y * width + x; if (x >= width) return; if (y >= height) return; //printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel); const float s = 1.0f; const uchar3 px = srcImage[pixel]; dstImage[pixel] = make_float4(px.x * s, px.y * s, px.z * s, 255.0f * s); } hipError_t cudaRGBToRGBAf( uchar3* srcDev, float4* destDev, size_t width, size_t height ) { if( !srcDev || !destDev ) return hipErrorInvalidDevicePointer; const dim3 blockDim(8,8,1); const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1); hipLaunchKernelGGL(( RGBToRGBAf), dim3(gridDim), dim3(blockDim), 0, 0, srcDev, destDev, width, height ); //hipDeviceSynchronize(); return CUDA(hipGetLastError()); }
f21ef3f0c02d10968452630ad0c4ae50accaa477.cu
/* * http://github.com/dusty-nv */ #include "cudaRGB.h" //------------------------------------------------------------------------------------------------------------------------- __global__ void RGBToRGBAf(uchar3* srcImage, float4* dstImage, uint32_t width, uint32_t height) { int x, y, pixel; x = (blockIdx.x * blockDim.x) + threadIdx.x; y = (blockIdx.y * blockDim.y) + threadIdx.y; pixel = y * width + x; if (x >= width) return; if (y >= height) return; //printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel); const float s = 1.0f; const uchar3 px = srcImage[pixel]; dstImage[pixel] = make_float4(px.x * s, px.y * s, px.z * s, 255.0f * s); } cudaError_t cudaRGBToRGBAf( uchar3* srcDev, float4* destDev, size_t width, size_t height ) { if( !srcDev || !destDev ) return cudaErrorInvalidDevicePointer; const dim3 blockDim(8,8,1); const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1); RGBToRGBAf<<<gridDim, blockDim>>>( srcDev, destDev, width, height ); //cudaThreadSynchronize(); return CUDA(cudaGetLastError()); }
b9b9e031b39a2099637ebbdbc3eeb55cf653fef4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "uchar4tofloat4.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; uchar4 *inputImage = NULL; hipMalloc(&inputImage, XSIZE*YSIZE); float4 *outputImage = NULL; hipMalloc(&outputImage, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( uchar4tofloat4), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,outputImage,width,height); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( uchar4tofloat4), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,outputImage,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( uchar4tofloat4), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,outputImage,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b9b9e031b39a2099637ebbdbc3eeb55cf653fef4.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "uchar4tofloat4.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; uchar4 *inputImage = NULL; cudaMalloc(&inputImage, XSIZE*YSIZE); float4 *outputImage = NULL; cudaMalloc(&outputImage, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); uchar4tofloat4<<<gridBlock,threadBlock>>>(inputImage,outputImage,width,height); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { uchar4tofloat4<<<gridBlock,threadBlock>>>(inputImage,outputImage,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { uchar4tofloat4<<<gridBlock,threadBlock>>>(inputImage,outputImage,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3c8441bf52d2087d69f6d9b68511374dbd279fa6.hip
// !!! This is a file automatically generated by hipify!!! #include "SequenceVisitor.cuh" #include "EstimateInputSize.cuh" template<> void SequenceVisitor::set_arguments_size<velo_estimate_input_size_t>( velo_estimate_input_size_t::arguments_t arguments, const RuntimeOptions& runtime_options, const Constants& constants, const HostBuffers& host_buffers) { if (logger::ll.verbosityLevel >= logger::debug) { debug_cout << "# of events = " << host_buffers.host_number_of_selected_events[0] << std::endl; } arguments.set_size<dev_velo_raw_input>(runtime_options.host_velopix_events_size); arguments.set_size<dev_velo_raw_input_offsets>(runtime_options.host_velopix_event_offsets_size); arguments.set_size<dev_estimated_input_size>( host_buffers.host_number_of_selected_events[0] * Velo::Constants::n_modules + 1); arguments.set_size<dev_module_cluster_num>( host_buffers.host_number_of_selected_events[0] * Velo::Constants::n_modules); arguments.set_size<dev_module_candidate_num>(host_buffers.host_number_of_selected_events[0]); arguments.set_size<dev_cluster_candidates>( host_buffers.host_number_of_selected_events[0] * VeloClustering::max_candidates_event); arguments.set_size<dev_event_order>(host_buffers.host_number_of_selected_events[0]); } template<> void SequenceVisitor::visit<velo_estimate_input_size_t>( velo_estimate_input_size_t& state, const velo_estimate_input_size_t::arguments_t& arguments, const RuntimeOptions& runtime_options, const Constants& constants, HostBuffers& host_buffers, hipStream_t& cuda_stream, hipEvent_t& cuda_generic_event) { // Setup opts and arguments for kernel call state.set_opts(dim3(host_buffers.host_number_of_selected_events[0]), dim3(32, 26), cuda_stream); state.set_arguments( arguments.offset<dev_velo_raw_input>(), arguments.offset<dev_velo_raw_input_offsets>(), arguments.offset<dev_estimated_input_size>(), arguments.offset<dev_module_cluster_num>(), arguments.offset<dev_module_candidate_num>(), arguments.offset<dev_cluster_candidates>(), arguments.offset<dev_event_list>(), arguments.offset<dev_event_order>(), constants.dev_velo_candidate_ks); // Kernel call state.invoke(); }
3c8441bf52d2087d69f6d9b68511374dbd279fa6.cu
#include "SequenceVisitor.cuh" #include "EstimateInputSize.cuh" template<> void SequenceVisitor::set_arguments_size<velo_estimate_input_size_t>( velo_estimate_input_size_t::arguments_t arguments, const RuntimeOptions& runtime_options, const Constants& constants, const HostBuffers& host_buffers) { if (logger::ll.verbosityLevel >= logger::debug) { debug_cout << "# of events = " << host_buffers.host_number_of_selected_events[0] << std::endl; } arguments.set_size<dev_velo_raw_input>(runtime_options.host_velopix_events_size); arguments.set_size<dev_velo_raw_input_offsets>(runtime_options.host_velopix_event_offsets_size); arguments.set_size<dev_estimated_input_size>( host_buffers.host_number_of_selected_events[0] * Velo::Constants::n_modules + 1); arguments.set_size<dev_module_cluster_num>( host_buffers.host_number_of_selected_events[0] * Velo::Constants::n_modules); arguments.set_size<dev_module_candidate_num>(host_buffers.host_number_of_selected_events[0]); arguments.set_size<dev_cluster_candidates>( host_buffers.host_number_of_selected_events[0] * VeloClustering::max_candidates_event); arguments.set_size<dev_event_order>(host_buffers.host_number_of_selected_events[0]); } template<> void SequenceVisitor::visit<velo_estimate_input_size_t>( velo_estimate_input_size_t& state, const velo_estimate_input_size_t::arguments_t& arguments, const RuntimeOptions& runtime_options, const Constants& constants, HostBuffers& host_buffers, cudaStream_t& cuda_stream, cudaEvent_t& cuda_generic_event) { // Setup opts and arguments for kernel call state.set_opts(dim3(host_buffers.host_number_of_selected_events[0]), dim3(32, 26), cuda_stream); state.set_arguments( arguments.offset<dev_velo_raw_input>(), arguments.offset<dev_velo_raw_input_offsets>(), arguments.offset<dev_estimated_input_size>(), arguments.offset<dev_module_cluster_num>(), arguments.offset<dev_module_candidate_num>(), arguments.offset<dev_cluster_candidates>(), arguments.offset<dev_event_list>(), arguments.offset<dev_event_order>(), constants.dev_velo_candidate_ks); // Kernel call state.invoke(); }
4c0133c6d1ca3a8ab8a7d2f1fee6f902a339fc82.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_fp16.h" #define _VOLATILE_ #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #define load(x) __ldcg(x) #define store(x, value) __stcs(x, value) #define isnan(x) ( x != x ) #define N_WARPS _TPB_/32 #ifndef INFINITY #define INFINITY __int_as_float(0x7f800000) #endif typedef long long ll_t; typedef struct __builtin_align__(8) { float value; int index; } pair; #if (__CUDA_ARCH__ < 700) __device__ void __nanosleep(unsigned int ns){ clock_t start_clock = clock(); clock_t clock_offset = 0; while (clock_offset < ns) { clock_offset = clock() - start_clock; } } #endif __device__ __forceinline__ unsigned int bfe( unsigned int source, unsigned int bitIndex ) { unsigned int bit; asm volatile("bfe.u32 %0, %1, %2, %3;" : "=r"(bit) : "r"((unsigned int) source), "r"(bitIndex), "r"(1)); return bit; } __device__ __forceinline__ void warp_comparator( float &value, int &index, const int stride, const int direction ){ const float otherValue = __shfl_xor_sync(0xFFFFFFFF, value, stride); const int otherIndex = __shfl_xor_sync(0xFFFFFFFF, index, stride); // bool condition = value < otherValue == direction; // index = condition ? otherIndex : index; // value = condition ? otherValue : value; if (value < otherValue == direction){ index = otherIndex; value = otherValue; } } __device__ __forceinline__ void thread_comparator( float &value, int &index, float otherValue, int otherIndex, const int direction ){ bool condition = value > otherValue == direction; if (condition){ value = otherValue; index = otherIndex; } } __device__ __forceinline__ void bitonic_sort_2( float &value, int &index, int laneID ){ warp_comparator(value, index, 1, bfe(laneID, 1) ^ bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_4( float &value, int &index, int laneID ){ bitonic_sort_2(value, index, laneID); unsigned int bfe_2 = bfe(laneID, 2); warp_comparator(value, index, 2, bfe_2 ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe_2 ^ bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_8( float &value, int &index, int laneID ){ bitonic_sort_4(value, index, laneID); unsigned int bfe_3 = bfe(laneID, 3); warp_comparator(value, index, 4, bfe_3 ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe_3 ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe_3 ^ bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_16( float &value, int &index, int laneID ){ bitonic_sort_8(value, index, laneID); unsigned int bfe_4 = bfe(laneID, 4); warp_comparator(value, index, 8, bfe_4 ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe_4 ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe_4 ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe_4 ^ bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_32( float &value, int &index, int laneID ){ bitonic_sort_16(value, index, laneID); unsigned int bfe_5 = bfe(laneID, 5); warp_comparator(value, index, 16, bfe_5 ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe_5 ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe_5 ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe_5 ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe_5 ^ bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_global_1( float &value, int &index, float otherValue, int otherIndex ) { thread_comparator(value, index, otherValue, otherIndex, 0); } __device__ __forceinline__ void bitonic_sort_global_2( float &value, int &index, float otherValue, int otherIndex, int laneID ) { thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 1, !bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_global_4( float &value, int &index, float otherValue, int otherIndex, int laneID ) { thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_global_8( float &value, int &index, float otherValue, int otherIndex, int laneID ) { thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_global_16( float &value, int &index, float otherValue, int otherIndex, int laneID ) { thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_global_32( float &value, int &index, float otherValue, int otherIndex, int laneID ) { thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } __device__ __forceinline__ bool is_queue_full( int queueFront, int queueRear ){ // return ((queueFront - queueRear) == 1 || (queueFront == 0 && queueRear == _QCAP_ - 1)); return (queueRear + 1) % _QCAP_ == queueFront; } __device__ __forceinline__ bool is_queue_empty( int queueFront, int queueRear ){ return queueFront == -1; } __device__ __forceinline__ void push_queue( _VOLATILE_ pair queueSmem[N_WARPS][32][_QCAP_], pair newPair, int &queueFront, int &queueRear, int wx, int wy ) { // const int tid = threadIdx.x; if (is_queue_full(queueFront, queueRear)){ return; } else if (is_queue_empty(queueFront, queueRear)){ queueFront = 0; queueRear = 0; queueSmem[wy][wx][queueRear] = newPair; } else { queueRear = (queueRear + 1) % _QCAP_; queueSmem[wy][wx][queueRear] = newPair; } } __device__ __forceinline__ void pop_queue( _VOLATILE_ pair queueSmem[N_WARPS][32][_QCAP_], pair &oldPair, int &queueFront, int &queueRear, int wx, int wy ) { if (is_queue_empty(queueFront, queueRear)){ return; } else if (queueFront == queueRear){ pair poppedPair = queueSmem[wy][wx][queueFront]; oldPair.value = poppedPair.value; oldPair.index = poppedPair.index; queueFront = -1; queueRear = -1; } else { pair poppedPair = queueSmem[wy][wx][queueFront]; oldPair.value = poppedPair.value; oldPair.index = poppedPair.index; //oldPair = queueSmem[tid][queueFront]; queueFront = (queueFront + 1) % _QCAP_; } } __device__ __forceinline__ void push_pop_queue( _VOLATILE_ pair queueSmem[N_WARPS][32][_QCAP_], pair newPair, pair &oldPair, int &queueFront, int &queueRear, int wx, int wy ) { const int tid = threadIdx.x; if (is_queue_empty(queueFront, queueRear)){ return; } else if (queueFront == queueRear){ oldPair = queueSmem[wy][wx][queueFront]; queueSmem[wy][wx][queueRear] = newPair; } else { oldPair = queueSmem[wy][wx][queueFront]; queueFront = (queueFront + 1) % _QCAP_; queueRear = (queueRear + 1) % _QCAP_; queueSmem[wy][wx][queueRear] = newPair; } } __device__ __forceinline__ void init_queue( _VOLATILE_ pair queueSmem[N_WARPS][32][_QCAP_], const int wx, const int wy ){ pair emptyPair; emptyPair.value = -INFINITY; emptyPair.index = -1; #pragma unroll for (int i=0; i<_QCAP_; i++){ queueSmem[wy][wx][i] = emptyPair; } } __device__ __forceinline__ void sort_( float &finalValue, int &finalIndex, float value, int index, int K ){ int tid = threadIdx.x; int wx; // int wx = tid % 32; // int wy = tid / 32; // #if _TPB_ == 32 // bitonic_sort_32(value, index, wx); switch (K){ case 1: bitonic_sort_global_1( finalValue, finalIndex, value, index); break; case 2: wx = tid % 2; bitonic_sort_2(value, index, wx); bitonic_sort_global_2( finalValue, finalIndex, value, index, wx); break; case 4: wx = tid % 4; bitonic_sort_4(value, index, wx); bitonic_sort_global_4( finalValue, finalIndex, value, index, wx); break; case 8: wx = tid % 8; bitonic_sort_8(value, index, wx); bitonic_sort_global_8( finalValue, finalIndex, value, index, wx); break; case 16: wx = tid % 16; bitonic_sort_16(value, index, wx); bitonic_sort_global_16( finalValue, finalIndex, value, index, wx); break; case 32: wx = tid % 32; bitonic_sort_32(value, index, wx); bitonic_sort_global_32( finalValue, finalIndex, value, index, wx); break; } } __device__ __forceinline__ void sort( float &finalValue, int &finalIndex, float value, int index, int K ){ int tid = threadIdx.x; int wx = tid % 32; int wy = tid / 32; // #if _TPB_ == 32 bitonic_sort_32(value, index, wx); switch (K){ case 1: bitonic_sort_global_1( finalValue, finalIndex, value, index); break; case 2: bitonic_sort_global_2( finalValue, finalIndex, value, index, wx); break; case 4: bitonic_sort_global_4( finalValue, finalIndex, value, index, wx); break; case 8: bitonic_sort_global_8( finalValue, finalIndex, value, index, wx); break; case 16: bitonic_sort_global_16( finalValue, finalIndex, value, index, wx); break; case 32: bitonic_sort_global_32( finalValue, finalIndex, value, index, wx); break; } } __device__ __forceinline__ void prefetch( const float* mat, const int i, const int iM, const int wx, const int N ){ int iN = ( i * 32 + wx ); if (likely(iN < N)){ const float* address = mat + (iM) * N+ iN; // asm("prefetch_batched.global.L1 [%0];" :: "l"(address) ); asm("prefetchu.L1 [%0];" :: "l"(address) ); } } __device__ __forceinline__ void prefetch_batched( const float* mat, const int i, const int iM, const int wx, const int N ){ #pragma unroll for (int j=0; j<_TN_; j++){ int iN = ( i * _TN_ * 32 + j * 32 + wx ); if (likely(iN < N)){ const float* address = mat + (iM) * N+ iN; // asm("prefetch_batched.global.L1 [%0];" :: "l"(address) ); asm("prefetchu.L1 [%0];" :: "l"(address) ); } } } __device__ __forceinline__ void prefetch_batched_fp16( const __half* mat, const int i, const int iM, const int wx, const int N ){ #pragma unroll for (int j=0; j<_TN_; j++){ int iN = ( i * _TN_ * 32 + j * 32 + wx ); if (likely(iN < N)){ const __half* address = mat + (iM) * N+ iN; // asm("prefetch_batched.global.L1 [%0];" :: "l"(address) ); asm("prefetchu.L1 [%0];" :: "l"(address) ); } } } __device__ __forceinline__ void load_buffer_batched( const float* mat, pair buffer[_TN_], const int i, const int iM, const int wx, const int N ){ const int tid = threadIdx.x; #pragma unroll for (int j=0; j<_TN_; j++){ int iN = ( i * _TN_ * 32 + j * 32 + wx ); if (likely(iN < N)){ buffer[j].index = iN; buffer[j].value = mat[ (iM) * N + iN ]; } else { buffer[j].value = -INFINITY; buffer[j].index = -1; } } } __device__ __forceinline__ void load_buffer_batched_fp16( const __half* mat, pair buffer[_TN_], const int i, const int iM, const int wx, const int N ){ const int tid = threadIdx.x; #pragma unroll for (int j=0; j<_TN_; j++){ int iN = ( i * _TN_ * 32 + j * 32 + wx ); if (likely(iN < N)){ buffer[j].index = iN; buffer[j].value = __half2float(mat[ (iM) * N + iN ]); } else { buffer[j].value = -INFINITY; buffer[j].index = -1; } } } __device__ __forceinline__ void arr2arr( pair src[_TN_], pair tar[_TN_] ){ #pragma unroll for (int i=0; i<_TN_; i++){ tar[i] = src[i]; } } extern "C" __global__ void top1_select( const float* __restrict__ mat, float* __restrict__ gValue, ll_t* __restrict__ gIndex, int M, int N, int K ){ const int tid = threadIdx.x; const int wx = tid % 32; const int wy = tid / 32; // const ll_t iM = blockIdx.x; const int mStart = blockIdx.x * N_WARPS; const int iM = mStart + wy; pair finalPair; finalPair.value = -INFINITY; finalPair.index = -1; pair working[_TN_]; prefetch_batched(mat, 0, iM, wx, N); const int nIter = (N + 32 * _TN_ - 1) / (32 * _TN_); for (int i=0; i < nIter; i++){ if (i + 1 < nIter){ prefetch_batched(mat, i + 1, iM, wx, N); } load_buffer_batched(mat, working, i, iM, wx, N); #pragma unroll for (int j=0; j < _TN_; j++){ pair newPair = working[j]; if (newPair.value > finalPair.value){ finalPair = newPair; } } } // sort( // finalPair.value, finalPair.index, // finalPair.value, finalPair.index, // K // ); bitonic_sort_32( finalPair.value, finalPair.index, wx ); // last K threads write their finalValue and finalIndex to gValue and gIndex if (32 - K <= wx){ const int writeAddress = (iM * K) + wx - (32 - K); gValue[writeAddress] = finalPair.value; gIndex[writeAddress] = ll_t(finalPair.index); } } extern "C" __global__ void top1_select_fp16( const __half* __restrict__ mat, __half* __restrict__ gValue, ll_t* __restrict__ gIndex, int M, int N, int K ){ const int tid = threadIdx.x; const int wx = tid % 32; const int wy = tid / 32; // const ll_t iM = blockIdx.x; const int mStart = blockIdx.x * N_WARPS; const int iM = mStart + wy; pair finalPair; finalPair.value = -INFINITY; finalPair.index = -1; pair working[_TN_]; prefetch_batched_fp16(mat, 0, iM, wx, N); const int nIter = (N + 32 * _TN_ - 1) / (32 * _TN_); for (int i=0; i < nIter; i++){ if (i + 1 < nIter){ prefetch_batched_fp16(mat, i + 1, iM, wx, N); } load_buffer_batched_fp16(mat, working, i, iM, wx, N); #pragma unroll for (int j=0; j < _TN_; j++){ pair newPair = working[j]; if (newPair.value > finalPair.value){ finalPair = newPair; } } } // sort( // finalPair.value, finalPair.index, // finalPair.value, finalPair.index, // K // ); bitonic_sort_32( finalPair.value, finalPair.index, wx ); // last K threads write their finalValue and finalIndex to gValue and gIndex if (32 - K <= wx){ const int writeAddress = (iM * K) + wx - (32 - K); gValue[writeAddress] = __float2half(finalPair.value); gIndex[writeAddress] = ll_t(finalPair.index); } }
4c0133c6d1ca3a8ab8a7d2f1fee6f902a339fc82.cu
#include "cuda_fp16.h" #define _VOLATILE_ #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #define load(x) __ldcg(x) #define store(x, value) __stcs(x, value) #define isnan(x) ( x != x ) #define N_WARPS _TPB_/32 #ifndef INFINITY #define INFINITY __int_as_float(0x7f800000) #endif typedef long long ll_t; typedef struct __builtin_align__(8) { float value; int index; } pair; #if (__CUDA_ARCH__ < 700) __device__ void __nanosleep(unsigned int ns){ clock_t start_clock = clock(); clock_t clock_offset = 0; while (clock_offset < ns) { clock_offset = clock() - start_clock; } } #endif __device__ __forceinline__ unsigned int bfe( unsigned int source, unsigned int bitIndex ) { unsigned int bit; asm volatile("bfe.u32 %0, %1, %2, %3;" : "=r"(bit) : "r"((unsigned int) source), "r"(bitIndex), "r"(1)); return bit; } __device__ __forceinline__ void warp_comparator( float &value, int &index, const int stride, const int direction ){ const float otherValue = __shfl_xor_sync(0xFFFFFFFF, value, stride); const int otherIndex = __shfl_xor_sync(0xFFFFFFFF, index, stride); // bool condition = value < otherValue == direction; // index = condition ? otherIndex : index; // value = condition ? otherValue : value; if (value < otherValue == direction){ index = otherIndex; value = otherValue; } } __device__ __forceinline__ void thread_comparator( float &value, int &index, float otherValue, int otherIndex, const int direction ){ bool condition = value > otherValue == direction; if (condition){ value = otherValue; index = otherIndex; } } __device__ __forceinline__ void bitonic_sort_2( float &value, int &index, int laneID ){ warp_comparator(value, index, 1, bfe(laneID, 1) ^ bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_4( float &value, int &index, int laneID ){ bitonic_sort_2(value, index, laneID); unsigned int bfe_2 = bfe(laneID, 2); warp_comparator(value, index, 2, bfe_2 ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe_2 ^ bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_8( float &value, int &index, int laneID ){ bitonic_sort_4(value, index, laneID); unsigned int bfe_3 = bfe(laneID, 3); warp_comparator(value, index, 4, bfe_3 ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe_3 ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe_3 ^ bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_16( float &value, int &index, int laneID ){ bitonic_sort_8(value, index, laneID); unsigned int bfe_4 = bfe(laneID, 4); warp_comparator(value, index, 8, bfe_4 ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe_4 ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe_4 ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe_4 ^ bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_32( float &value, int &index, int laneID ){ bitonic_sort_16(value, index, laneID); unsigned int bfe_5 = bfe(laneID, 5); warp_comparator(value, index, 16, bfe_5 ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe_5 ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe_5 ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe_5 ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe_5 ^ bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_global_1( float &value, int &index, float otherValue, int otherIndex ) { thread_comparator(value, index, otherValue, otherIndex, 0); } __device__ __forceinline__ void bitonic_sort_global_2( float &value, int &index, float otherValue, int otherIndex, int laneID ) { thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 1, !bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_global_4( float &value, int &index, float otherValue, int otherIndex, int laneID ) { thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_global_8( float &value, int &index, float otherValue, int otherIndex, int laneID ) { thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_global_16( float &value, int &index, float otherValue, int otherIndex, int laneID ) { thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } __device__ __forceinline__ void bitonic_sort_global_32( float &value, int &index, float otherValue, int otherIndex, int laneID ) { thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } __device__ __forceinline__ bool is_queue_full( int queueFront, int queueRear ){ // return ((queueFront - queueRear) == 1 || (queueFront == 0 && queueRear == _QCAP_ - 1)); return (queueRear + 1) % _QCAP_ == queueFront; } __device__ __forceinline__ bool is_queue_empty( int queueFront, int queueRear ){ return queueFront == -1; } __device__ __forceinline__ void push_queue( _VOLATILE_ pair queueSmem[N_WARPS][32][_QCAP_], pair newPair, int &queueFront, int &queueRear, int wx, int wy ) { // const int tid = threadIdx.x; if (is_queue_full(queueFront, queueRear)){ return; } else if (is_queue_empty(queueFront, queueRear)){ queueFront = 0; queueRear = 0; queueSmem[wy][wx][queueRear] = newPair; } else { queueRear = (queueRear + 1) % _QCAP_; queueSmem[wy][wx][queueRear] = newPair; } } __device__ __forceinline__ void pop_queue( _VOLATILE_ pair queueSmem[N_WARPS][32][_QCAP_], pair &oldPair, int &queueFront, int &queueRear, int wx, int wy ) { if (is_queue_empty(queueFront, queueRear)){ return; } else if (queueFront == queueRear){ pair poppedPair = queueSmem[wy][wx][queueFront]; oldPair.value = poppedPair.value; oldPair.index = poppedPair.index; queueFront = -1; queueRear = -1; } else { pair poppedPair = queueSmem[wy][wx][queueFront]; oldPair.value = poppedPair.value; oldPair.index = poppedPair.index; //oldPair = queueSmem[tid][queueFront]; queueFront = (queueFront + 1) % _QCAP_; } } __device__ __forceinline__ void push_pop_queue( _VOLATILE_ pair queueSmem[N_WARPS][32][_QCAP_], pair newPair, pair &oldPair, int &queueFront, int &queueRear, int wx, int wy ) { const int tid = threadIdx.x; if (is_queue_empty(queueFront, queueRear)){ return; } else if (queueFront == queueRear){ oldPair = queueSmem[wy][wx][queueFront]; queueSmem[wy][wx][queueRear] = newPair; } else { oldPair = queueSmem[wy][wx][queueFront]; queueFront = (queueFront + 1) % _QCAP_; queueRear = (queueRear + 1) % _QCAP_; queueSmem[wy][wx][queueRear] = newPair; } } __device__ __forceinline__ void init_queue( _VOLATILE_ pair queueSmem[N_WARPS][32][_QCAP_], const int wx, const int wy ){ pair emptyPair; emptyPair.value = -INFINITY; emptyPair.index = -1; #pragma unroll for (int i=0; i<_QCAP_; i++){ queueSmem[wy][wx][i] = emptyPair; } } __device__ __forceinline__ void sort_( float &finalValue, int &finalIndex, float value, int index, int K ){ int tid = threadIdx.x; int wx; // int wx = tid % 32; // int wy = tid / 32; // #if _TPB_ == 32 // bitonic_sort_32(value, index, wx); switch (K){ case 1: bitonic_sort_global_1( finalValue, finalIndex, value, index); break; case 2: wx = tid % 2; bitonic_sort_2(value, index, wx); bitonic_sort_global_2( finalValue, finalIndex, value, index, wx); break; case 4: wx = tid % 4; bitonic_sort_4(value, index, wx); bitonic_sort_global_4( finalValue, finalIndex, value, index, wx); break; case 8: wx = tid % 8; bitonic_sort_8(value, index, wx); bitonic_sort_global_8( finalValue, finalIndex, value, index, wx); break; case 16: wx = tid % 16; bitonic_sort_16(value, index, wx); bitonic_sort_global_16( finalValue, finalIndex, value, index, wx); break; case 32: wx = tid % 32; bitonic_sort_32(value, index, wx); bitonic_sort_global_32( finalValue, finalIndex, value, index, wx); break; } } __device__ __forceinline__ void sort( float &finalValue, int &finalIndex, float value, int index, int K ){ int tid = threadIdx.x; int wx = tid % 32; int wy = tid / 32; // #if _TPB_ == 32 bitonic_sort_32(value, index, wx); switch (K){ case 1: bitonic_sort_global_1( finalValue, finalIndex, value, index); break; case 2: bitonic_sort_global_2( finalValue, finalIndex, value, index, wx); break; case 4: bitonic_sort_global_4( finalValue, finalIndex, value, index, wx); break; case 8: bitonic_sort_global_8( finalValue, finalIndex, value, index, wx); break; case 16: bitonic_sort_global_16( finalValue, finalIndex, value, index, wx); break; case 32: bitonic_sort_global_32( finalValue, finalIndex, value, index, wx); break; } } __device__ __forceinline__ void prefetch( const float* mat, const int i, const int iM, const int wx, const int N ){ int iN = ( i * 32 + wx ); if (likely(iN < N)){ const float* address = mat + (iM) * N+ iN; // asm("prefetch_batched.global.L1 [%0];" :: "l"(address) ); asm("prefetchu.L1 [%0];" :: "l"(address) ); } } __device__ __forceinline__ void prefetch_batched( const float* mat, const int i, const int iM, const int wx, const int N ){ #pragma unroll for (int j=0; j<_TN_; j++){ int iN = ( i * _TN_ * 32 + j * 32 + wx ); if (likely(iN < N)){ const float* address = mat + (iM) * N+ iN; // asm("prefetch_batched.global.L1 [%0];" :: "l"(address) ); asm("prefetchu.L1 [%0];" :: "l"(address) ); } } } __device__ __forceinline__ void prefetch_batched_fp16( const __half* mat, const int i, const int iM, const int wx, const int N ){ #pragma unroll for (int j=0; j<_TN_; j++){ int iN = ( i * _TN_ * 32 + j * 32 + wx ); if (likely(iN < N)){ const __half* address = mat + (iM) * N+ iN; // asm("prefetch_batched.global.L1 [%0];" :: "l"(address) ); asm("prefetchu.L1 [%0];" :: "l"(address) ); } } } __device__ __forceinline__ void load_buffer_batched( const float* mat, pair buffer[_TN_], const int i, const int iM, const int wx, const int N ){ const int tid = threadIdx.x; #pragma unroll for (int j=0; j<_TN_; j++){ int iN = ( i * _TN_ * 32 + j * 32 + wx ); if (likely(iN < N)){ buffer[j].index = iN; buffer[j].value = mat[ (iM) * N + iN ]; } else { buffer[j].value = -INFINITY; buffer[j].index = -1; } } } __device__ __forceinline__ void load_buffer_batched_fp16( const __half* mat, pair buffer[_TN_], const int i, const int iM, const int wx, const int N ){ const int tid = threadIdx.x; #pragma unroll for (int j=0; j<_TN_; j++){ int iN = ( i * _TN_ * 32 + j * 32 + wx ); if (likely(iN < N)){ buffer[j].index = iN; buffer[j].value = __half2float(mat[ (iM) * N + iN ]); } else { buffer[j].value = -INFINITY; buffer[j].index = -1; } } } __device__ __forceinline__ void arr2arr( pair src[_TN_], pair tar[_TN_] ){ #pragma unroll for (int i=0; i<_TN_; i++){ tar[i] = src[i]; } } extern "C" __global__ void top1_select( const float* __restrict__ mat, float* __restrict__ gValue, ll_t* __restrict__ gIndex, int M, int N, int K ){ const int tid = threadIdx.x; const int wx = tid % 32; const int wy = tid / 32; // const ll_t iM = blockIdx.x; const int mStart = blockIdx.x * N_WARPS; const int iM = mStart + wy; pair finalPair; finalPair.value = -INFINITY; finalPair.index = -1; pair working[_TN_]; prefetch_batched(mat, 0, iM, wx, N); const int nIter = (N + 32 * _TN_ - 1) / (32 * _TN_); for (int i=0; i < nIter; i++){ if (i + 1 < nIter){ prefetch_batched(mat, i + 1, iM, wx, N); } load_buffer_batched(mat, working, i, iM, wx, N); #pragma unroll for (int j=0; j < _TN_; j++){ pair newPair = working[j]; if (newPair.value > finalPair.value){ finalPair = newPair; } } } // sort( // finalPair.value, finalPair.index, // finalPair.value, finalPair.index, // K // ); bitonic_sort_32( finalPair.value, finalPair.index, wx ); // last K threads write their finalValue and finalIndex to gValue and gIndex if (32 - K <= wx){ const int writeAddress = (iM * K) + wx - (32 - K); gValue[writeAddress] = finalPair.value; gIndex[writeAddress] = ll_t(finalPair.index); } } extern "C" __global__ void top1_select_fp16( const __half* __restrict__ mat, __half* __restrict__ gValue, ll_t* __restrict__ gIndex, int M, int N, int K ){ const int tid = threadIdx.x; const int wx = tid % 32; const int wy = tid / 32; // const ll_t iM = blockIdx.x; const int mStart = blockIdx.x * N_WARPS; const int iM = mStart + wy; pair finalPair; finalPair.value = -INFINITY; finalPair.index = -1; pair working[_TN_]; prefetch_batched_fp16(mat, 0, iM, wx, N); const int nIter = (N + 32 * _TN_ - 1) / (32 * _TN_); for (int i=0; i < nIter; i++){ if (i + 1 < nIter){ prefetch_batched_fp16(mat, i + 1, iM, wx, N); } load_buffer_batched_fp16(mat, working, i, iM, wx, N); #pragma unroll for (int j=0; j < _TN_; j++){ pair newPair = working[j]; if (newPair.value > finalPair.value){ finalPair = newPair; } } } // sort( // finalPair.value, finalPair.index, // finalPair.value, finalPair.index, // K // ); bitonic_sort_32( finalPair.value, finalPair.index, wx ); // last K threads write their finalValue and finalIndex to gValue and gIndex if (32 - K <= wx){ const int writeAddress = (iM * K) + wx - (32 - K); gValue[writeAddress] = __float2half(finalPair.value); gIndex[writeAddress] = ll_t(finalPair.index); } }
06764852d378fb84f1e95910888f9c5296a593ad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> int main() { }
06764852d378fb84f1e95910888f9c5296a593ad.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> int main() { }
5e832307202e11e2f95861785f5a42164d11f0e3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #include "tensorAccessor.h" // A demo of packed tensor accessors in Pytorch __global__ void tensor_packed_accessor_kernel ( PackedTensorAccessor64<float, 1, RestrictPtrTraits> r, PackedTensorAccessor64<float, 2, RestrictPtrTraits> m, PackedTensorAccessor64<float, 1, RestrictPtrTraits> v) { int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < r.size(0)) { float val = 0.0f; for (int64_t j = 0; j < m.size(1); j++) { val += m[i][j] * v[j]; } r[i] = val; } } __global__ void raw_accessor_kernel ( const int64_t nrow, const int64_t ncol, float *__restrict__ r, const float *__restrict__ m, const float *__restrict__ v) { int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nrow) { float val = 0.0f; for (int64_t j = 0; j < ncol; j++) { val += m[i * ncol + j] * v[j]; } r[i] = val; } } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <number of rows> <number of columns> <repeat>\n", argv[0]); return 1; } const int64_t nrow = atol(argv[1]); const int64_t ncol = atol(argv[2]); const int repeat = atoi(argv[3]); // tensor sizes and strides const int64_t sizes[2] = {nrow, ncol}; const int64_t strides[2] = {ncol, 1}; int64_t numel = 1; for (int i = 0; i < 2; i++) numel *= sizes[i]; // matrix vector multiply int64_t m_bytes = numel * sizeof(float); int64_t v_bytes = ncol * sizeof(float); int64_t r_bytes = nrow * sizeof(float); float *m, *v, *r, *r_ref; m = (float*) malloc (m_bytes); v = (float*) malloc (v_bytes); r = (float*) malloc (r_bytes); r_ref = (float*) malloc (r_bytes); srand(123); for (int64_t i = 0; i < numel; i++) { m[i] = rand() / (float)RAND_MAX; } for (int64_t i = 0; i < ncol; i++) { v[i] = rand() / (float)RAND_MAX; } for (int64_t i = 0; i < nrow; i++) { float val = 0.f; for (int64_t j = 0; j < ncol; j++) { val += m[i * ncol + j] * v[j]; } r_ref[i] = val; } float *d_m, *d_v, *d_r; hipMalloc((void**)&d_m, m_bytes); hipMemcpy(d_m, m, m_bytes, hipMemcpyHostToDevice); hipMalloc((void**)&d_v, v_bytes); hipMemcpy(d_v, v, v_bytes, hipMemcpyHostToDevice); hipMalloc((void**)&d_r, r_bytes); PackedTensorAccessor64<float, 2, RestrictPtrTraits> m_acc (d_m, sizes, strides); PackedTensorAccessor64<float, 1, RestrictPtrTraits> v_acc (d_v, &ncol, strides+1); PackedTensorAccessor64<float, 1, RestrictPtrTraits> r_acc (d_r, &nrow, strides+1); dim3 grid ((nrow + 255) / 256); dim3 block (256); printf("Warmup..\n"); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( tensor_packed_accessor_kernel), dim3(grid), dim3(block), 0, 0, r_acc, m_acc, v_acc); hipLaunchKernelGGL(( raw_accessor_kernel), dim3(grid), dim3(block), 0, 0, nrow, ncol, d_r, d_m, d_v); } hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( raw_accessor_kernel), dim3(grid), dim3(block), 0, 0, nrow, ncol, d_r, d_m, d_v); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of raw_accessor_kernel: %f (us)\n", time * 1e-3f / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( tensor_packed_accessor_kernel), dim3(grid), dim3(block), 0, 0, r_acc, m_acc, v_acc); } hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of tensor_packed_accessor_kernel: %f (us)\n", time * 1e-3f / repeat); hipMemcpy(r, d_r, r_bytes, hipMemcpyDeviceToHost); hipFree(d_m); hipFree(d_v); hipFree(d_r); // verify (may fail due to floating-point rounding) bool ok = true; for (int64_t i = 0; i < nrow; i++) { if (fabsf(r[i] - r_ref[i]) > 1e-3f) { printf("%f %f\n", r[i], r_ref[i]); ok = false; break; } } printf("%s\n", ok ? "PASS" : "FAIL"); free(m); free(v); free(r); free(r_ref); return 0; }
5e832307202e11e2f95861785f5a42164d11f0e3.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <cuda.h> #include "tensorAccessor.h" // A demo of packed tensor accessors in Pytorch __global__ void tensor_packed_accessor_kernel ( PackedTensorAccessor64<float, 1, RestrictPtrTraits> r, PackedTensorAccessor64<float, 2, RestrictPtrTraits> m, PackedTensorAccessor64<float, 1, RestrictPtrTraits> v) { int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < r.size(0)) { float val = 0.0f; for (int64_t j = 0; j < m.size(1); j++) { val += m[i][j] * v[j]; } r[i] = val; } } __global__ void raw_accessor_kernel ( const int64_t nrow, const int64_t ncol, float *__restrict__ r, const float *__restrict__ m, const float *__restrict__ v) { int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nrow) { float val = 0.0f; for (int64_t j = 0; j < ncol; j++) { val += m[i * ncol + j] * v[j]; } r[i] = val; } } int main(int argc, char* argv[]) { if (argc != 4) { printf("Usage: %s <number of rows> <number of columns> <repeat>\n", argv[0]); return 1; } const int64_t nrow = atol(argv[1]); const int64_t ncol = atol(argv[2]); const int repeat = atoi(argv[3]); // tensor sizes and strides const int64_t sizes[2] = {nrow, ncol}; const int64_t strides[2] = {ncol, 1}; int64_t numel = 1; for (int i = 0; i < 2; i++) numel *= sizes[i]; // matrix vector multiply int64_t m_bytes = numel * sizeof(float); int64_t v_bytes = ncol * sizeof(float); int64_t r_bytes = nrow * sizeof(float); float *m, *v, *r, *r_ref; m = (float*) malloc (m_bytes); v = (float*) malloc (v_bytes); r = (float*) malloc (r_bytes); r_ref = (float*) malloc (r_bytes); srand(123); for (int64_t i = 0; i < numel; i++) { m[i] = rand() / (float)RAND_MAX; } for (int64_t i = 0; i < ncol; i++) { v[i] = rand() / (float)RAND_MAX; } for (int64_t i = 0; i < nrow; i++) { float val = 0.f; for (int64_t j = 0; j < ncol; j++) { val += m[i * ncol + j] * v[j]; } r_ref[i] = val; } float *d_m, *d_v, *d_r; cudaMalloc((void**)&d_m, m_bytes); cudaMemcpy(d_m, m, m_bytes, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_v, v_bytes); cudaMemcpy(d_v, v, v_bytes, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_r, r_bytes); PackedTensorAccessor64<float, 2, RestrictPtrTraits> m_acc (d_m, sizes, strides); PackedTensorAccessor64<float, 1, RestrictPtrTraits> v_acc (d_v, &ncol, strides+1); PackedTensorAccessor64<float, 1, RestrictPtrTraits> r_acc (d_r, &nrow, strides+1); dim3 grid ((nrow + 255) / 256); dim3 block (256); printf("Warmup..\n"); for (int i = 0; i < repeat; i++) { tensor_packed_accessor_kernel<<<grid, block>>>(r_acc, m_acc, v_acc); raw_accessor_kernel<<<grid, block>>>(nrow, ncol, d_r, d_m, d_v); } cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { raw_accessor_kernel<<<grid, block>>>(nrow, ncol, d_r, d_m, d_v); } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of raw_accessor_kernel: %f (us)\n", time * 1e-3f / repeat); start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { tensor_packed_accessor_kernel<<<grid, block>>>(r_acc, m_acc, v_acc); } cudaDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of tensor_packed_accessor_kernel: %f (us)\n", time * 1e-3f / repeat); cudaMemcpy(r, d_r, r_bytes, cudaMemcpyDeviceToHost); cudaFree(d_m); cudaFree(d_v); cudaFree(d_r); // verify (may fail due to floating-point rounding) bool ok = true; for (int64_t i = 0; i < nrow; i++) { if (fabsf(r[i] - r_ref[i]) > 1e-3f) { printf("%f %f\n", r[i], r_ref[i]); ok = false; break; } } printf("%s\n", ok ? "PASS" : "FAIL"); free(m); free(v); free(r); free(r_ref); return 0; }
a6a6243cbcff5cbcdc7a7a268c67698456653546.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <random> #include "hip/hip_runtime_api.h" double *InitializeArray(const int length, const int seed) { double *A = (double*)malloc(length * sizeof(double)); std::default_random_engine e; std::uniform_real_distribution<double> rand(0, 10); e.seed(seed); for (int i = 0; i < length; ++i) { A[i] = rand(e); } return A; } void PrintArray(double *A, const int length, std::string str) { std::cout <<"Array " << str << ":"; for (int i = 0; i < length; ++i) { std::cout << " " << A[i]; } std::cout << std::endl; } __host__ __device__ double MaxElement(double a, double b, double c) { if ((a >= b) && (a >= c)) { return a; } if (b >= c) { return b; } return c; } double *MaxElements(double *A, double *B, double *C, const int length) { double *D = (double*)malloc(length * sizeof(double)); for (int i = 0; i < length; ++i) { D[i] = MaxElement(A[i], B[i], C[i]); } return D; } __global__ void MaxElementsKernel(double *A, double *B, double *C, double *D) { int i = threadIdx.x; D[i] = MaxElement(A[i], B[i], C[i]); } int main() { const int length = 10; const size_t size = length * sizeof(double); double *h_A, *h_B, *h_C, *h_D; h_A = (double*)malloc(size); h_B = (double*)malloc(size); h_C = (double*)malloc(size); h_D = (double*)malloc(size); h_A = InitializeArray(length, 0); h_B = InitializeArray(length, 5); h_C = InitializeArray(length, 10); PrintArray(h_A, length, "A"); PrintArray(h_B, length, "B"); PrintArray(h_C, length, "C"); std::cout << "CPU Result:\n"; PrintArray(MaxElements(h_A, h_B, h_C, length), length, "D"); std::cout << "GPU Result:\n"; const int dev = 0; hipSetDevice(dev); double *d_A, *d_B, *d_C, *d_D; hipMalloc((void **)&d_A, size); hipMalloc((void **)&d_B, size); hipMalloc((void **)&d_C, size); hipMalloc((void **)&d_D, size); hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); hipMemcpy(d_C, h_C, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( MaxElementsKernel), dim3(1), dim3(length), 0, 0, d_A, d_B, d_C, d_D); hipMemcpy(h_D, d_D, size, hipMemcpyDeviceToHost); PrintArray(h_D, length, "D"); free(h_A); free(h_B); free(h_C); free(h_D); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipFree(d_D); return 0; }
a6a6243cbcff5cbcdc7a7a268c67698456653546.cu
#include <iostream> #include <random> #include "cuda_runtime_api.h" double *InitializeArray(const int length, const int seed) { double *A = (double*)malloc(length * sizeof(double)); std::default_random_engine e; std::uniform_real_distribution<double> rand(0, 10); e.seed(seed); for (int i = 0; i < length; ++i) { A[i] = rand(e); } return A; } void PrintArray(double *A, const int length, std::string str) { std::cout <<"Array " << str << ":"; for (int i = 0; i < length; ++i) { std::cout << " " << A[i]; } std::cout << std::endl; } __host__ __device__ double MaxElement(double a, double b, double c) { if ((a >= b) && (a >= c)) { return a; } if (b >= c) { return b; } return c; } double *MaxElements(double *A, double *B, double *C, const int length) { double *D = (double*)malloc(length * sizeof(double)); for (int i = 0; i < length; ++i) { D[i] = MaxElement(A[i], B[i], C[i]); } return D; } __global__ void MaxElementsKernel(double *A, double *B, double *C, double *D) { int i = threadIdx.x; D[i] = MaxElement(A[i], B[i], C[i]); } int main() { const int length = 10; const size_t size = length * sizeof(double); double *h_A, *h_B, *h_C, *h_D; h_A = (double*)malloc(size); h_B = (double*)malloc(size); h_C = (double*)malloc(size); h_D = (double*)malloc(size); h_A = InitializeArray(length, 0); h_B = InitializeArray(length, 5); h_C = InitializeArray(length, 10); PrintArray(h_A, length, "A"); PrintArray(h_B, length, "B"); PrintArray(h_C, length, "C"); std::cout << "CPU Result:\n"; PrintArray(MaxElements(h_A, h_B, h_C, length), length, "D"); std::cout << "GPU Result:\n"; const int dev = 0; cudaSetDevice(dev); double *d_A, *d_B, *d_C, *d_D; cudaMalloc((void **)&d_A, size); cudaMalloc((void **)&d_B, size); cudaMalloc((void **)&d_C, size); cudaMalloc((void **)&d_D, size); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice); MaxElementsKernel<<<1, length>>>(d_A, d_B, d_C, d_D); cudaMemcpy(h_D, d_D, size, cudaMemcpyDeviceToHost); PrintArray(h_D, length, "D"); free(h_A); free(h_B); free(h_C); free(h_D); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaFree(d_D); return 0; }
a0296a46103687e93d52f0500f0d242810d8916e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpuClusterTracks.h" #include "gpuFitVertices.h" #include "gpuSortByPt2.h" #include "gpuSplitVertices.h" namespace gpuVertexFinder { void Producer::allocateOnGPU() { cudaCheck(hipMalloc(&onGPU.ntrks, sizeof(uint32_t))); cudaCheck(hipMemset(onGPU.ntrks, 0, sizeof(uint32_t))); cudaCheck(hipMalloc(&onGPU.itrk, OnGPU::MAXTRACKS*sizeof(uint16_t))); cudaCheck(hipMalloc(&onGPU.zt, OnGPU::MAXTRACKS*sizeof(float))); cudaCheck(hipMalloc(&onGPU.ezt2, OnGPU::MAXTRACKS*sizeof(float))); cudaCheck(hipMalloc(&onGPU.ptt2, OnGPU::MAXTRACKS*sizeof(float))); cudaCheck(hipMalloc(&onGPU.iv, OnGPU::MAXTRACKS*sizeof(int32_t))); cudaCheck(hipMalloc(&onGPU.nvFinal, sizeof(uint32_t))); cudaCheck(hipMalloc(&onGPU.nvIntermediate, sizeof(uint32_t))); cudaCheck(hipMalloc(&onGPU.zv, OnGPU::MAXVTX*sizeof(float))); cudaCheck(hipMalloc(&onGPU.wv, OnGPU::MAXVTX*sizeof(float))); cudaCheck(hipMalloc(&onGPU.chi2, OnGPU::MAXVTX*sizeof(float))); cudaCheck(hipMalloc(&onGPU.ptv2, OnGPU::MAXVTX*sizeof(float))); cudaCheck(hipMalloc(&onGPU.sortInd, OnGPU::MAXVTX*sizeof(uint16_t))); cudaCheck(hipMalloc(&onGPU.izt, OnGPU::MAXTRACKS*sizeof(uint8_t))); cudaCheck(hipMalloc(&onGPU.nn, OnGPU::MAXTRACKS*sizeof(int32_t))); cudaCheck(hipMalloc(&onGPU_d,sizeof(OnGPU))); cudaCheck(hipMemcpy(onGPU_d,&onGPU,sizeof(OnGPU),hipMemcpyHostToDevice)); } void Producer::deallocateOnGPU() { cudaCheck(hipFree(onGPU.ntrks)); cudaCheck(hipFree(onGPU.itrk)); cudaCheck(hipFree(onGPU.zt)); cudaCheck(hipFree(onGPU.ezt2)); cudaCheck(hipFree(onGPU.ptt2)); cudaCheck(hipFree(onGPU.iv)); cudaCheck(hipFree(onGPU.nvFinal)); cudaCheck(hipFree(onGPU.nvIntermediate)); cudaCheck(hipFree(onGPU.zv)); cudaCheck(hipFree(onGPU.wv)); cudaCheck(hipFree(onGPU.chi2)); cudaCheck(hipFree(onGPU.ptv2)); cudaCheck(hipFree(onGPU.sortInd)); cudaCheck(hipFree(onGPU.izt)); cudaCheck(hipFree(onGPU.nn)); cudaCheck(hipFree(onGPU_d)); } __global__ void loadTracks(pixelTuplesHeterogeneousProduct::TuplesOnGPU const * tracks, OnGPU * pdata, float ptMin ){ auto const & tuples = *tracks->tuples_d; auto const * fit = tracks->helix_fit_results_d; auto const * quality = tracks->quality_d; auto idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx>= tuples.nbins()) return; if (tuples.size(idx)==0) { return; } if(quality[idx] != pixelTuplesHeterogeneousProduct::loose ) return; auto const & fittedTrack = fit[idx]; if (fittedTrack.par(2)<ptMin) return; auto & data = *pdata; auto it = atomicAdd(data.ntrks,1); data.itrk[it] = idx; data.zt[it] = fittedTrack.par(4); data.ezt2[it] = fittedTrack.cov(4, 4); data.ptt2[it] = fittedTrack.par(2)*fittedTrack.par(2); } void Producer::produce(hipStream_t stream, TuplesOnCPU const & tracks, float ptMin) { assert(onGPU_d);assert(tracks.gpu_d); cudaCheck(hipMemsetAsync(onGPU.ntrks, 0, sizeof(uint32_t),stream)); auto blockSize = 128; auto numberOfBlocks = (CAConstants::maxTuples() + blockSize - 1) / blockSize; hipLaunchKernelGGL(( loadTracks), dim3(numberOfBlocks),dim3(blockSize),0,stream, tracks.gpu_d,onGPU_d, ptMin); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( clusterTracks), dim3(1),dim3(1024-256),0,stream, onGPU_d,minT,eps,errmax,chi2max); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( fitVertices), dim3(1),dim3(1024-256),0,stream, onGPU_d,50.); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( splitVertices), dim3(1024),dim3(128),0,stream, onGPU_d,9.f); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( fitVertices), dim3(1),dim3(1024-256),0,stream, onGPU_d,5000.); cudaCheck(hipGetLastError()); hipLaunchKernelGGL(( sortByPt2), dim3(1),dim3(256),0,stream, onGPU_d); cudaCheck(hipGetLastError()); if(enableTransfer) { cudaCheck(hipMemcpyAsync(&gpuProduct.nVertices, onGPU.nvFinal, sizeof(uint32_t), hipMemcpyDeviceToHost, stream)); cudaCheck(hipMemcpyAsync(&gpuProduct.nTracks, onGPU.ntrks, sizeof(uint32_t), hipMemcpyDeviceToHost, stream)); } } Producer::OnCPU const & Producer::fillResults(hipStream_t stream) { if(!enableTransfer) return gpuProduct; // finish copy gpuProduct.ivtx.resize(gpuProduct.nTracks); cudaCheck(hipMemcpyAsync(gpuProduct.ivtx.data(),onGPU.iv,sizeof(int32_t)*gpuProduct.nTracks, hipMemcpyDeviceToHost, stream)); gpuProduct.itrk.resize(gpuProduct.nTracks); cudaCheck(hipMemcpyAsync(gpuProduct.itrk.data(),onGPU.itrk,sizeof(int16_t)*gpuProduct.nTracks, hipMemcpyDeviceToHost, stream)); gpuProduct.z.resize(gpuProduct.nVertices); cudaCheck(hipMemcpyAsync(gpuProduct.z.data(),onGPU.zv,sizeof(float)*gpuProduct.nVertices, hipMemcpyDeviceToHost, stream)); gpuProduct.zerr.resize(gpuProduct.nVertices); cudaCheck(hipMemcpyAsync(gpuProduct.zerr.data(),onGPU.wv,sizeof(float)*gpuProduct.nVertices, hipMemcpyDeviceToHost, stream)); gpuProduct.chi2.resize(gpuProduct.nVertices); cudaCheck(hipMemcpyAsync(gpuProduct.chi2.data(),onGPU.chi2,sizeof(float)*gpuProduct.nVertices, hipMemcpyDeviceToHost, stream)); gpuProduct.sortInd.resize(gpuProduct.nVertices); cudaCheck(hipMemcpyAsync(gpuProduct.sortInd.data(),onGPU.sortInd,sizeof(uint16_t)*gpuProduct.nVertices, hipMemcpyDeviceToHost, stream)); hipStreamSynchronize(stream); return gpuProduct; } } // end namespace
a0296a46103687e93d52f0500f0d242810d8916e.cu
#include "gpuClusterTracks.h" #include "gpuFitVertices.h" #include "gpuSortByPt2.h" #include "gpuSplitVertices.h" namespace gpuVertexFinder { void Producer::allocateOnGPU() { cudaCheck(cudaMalloc(&onGPU.ntrks, sizeof(uint32_t))); cudaCheck(cudaMemset(onGPU.ntrks, 0, sizeof(uint32_t))); cudaCheck(cudaMalloc(&onGPU.itrk, OnGPU::MAXTRACKS*sizeof(uint16_t))); cudaCheck(cudaMalloc(&onGPU.zt, OnGPU::MAXTRACKS*sizeof(float))); cudaCheck(cudaMalloc(&onGPU.ezt2, OnGPU::MAXTRACKS*sizeof(float))); cudaCheck(cudaMalloc(&onGPU.ptt2, OnGPU::MAXTRACKS*sizeof(float))); cudaCheck(cudaMalloc(&onGPU.iv, OnGPU::MAXTRACKS*sizeof(int32_t))); cudaCheck(cudaMalloc(&onGPU.nvFinal, sizeof(uint32_t))); cudaCheck(cudaMalloc(&onGPU.nvIntermediate, sizeof(uint32_t))); cudaCheck(cudaMalloc(&onGPU.zv, OnGPU::MAXVTX*sizeof(float))); cudaCheck(cudaMalloc(&onGPU.wv, OnGPU::MAXVTX*sizeof(float))); cudaCheck(cudaMalloc(&onGPU.chi2, OnGPU::MAXVTX*sizeof(float))); cudaCheck(cudaMalloc(&onGPU.ptv2, OnGPU::MAXVTX*sizeof(float))); cudaCheck(cudaMalloc(&onGPU.sortInd, OnGPU::MAXVTX*sizeof(uint16_t))); cudaCheck(cudaMalloc(&onGPU.izt, OnGPU::MAXTRACKS*sizeof(uint8_t))); cudaCheck(cudaMalloc(&onGPU.nn, OnGPU::MAXTRACKS*sizeof(int32_t))); cudaCheck(cudaMalloc(&onGPU_d,sizeof(OnGPU))); cudaCheck(cudaMemcpy(onGPU_d,&onGPU,sizeof(OnGPU),cudaMemcpyHostToDevice)); } void Producer::deallocateOnGPU() { cudaCheck(cudaFree(onGPU.ntrks)); cudaCheck(cudaFree(onGPU.itrk)); cudaCheck(cudaFree(onGPU.zt)); cudaCheck(cudaFree(onGPU.ezt2)); cudaCheck(cudaFree(onGPU.ptt2)); cudaCheck(cudaFree(onGPU.iv)); cudaCheck(cudaFree(onGPU.nvFinal)); cudaCheck(cudaFree(onGPU.nvIntermediate)); cudaCheck(cudaFree(onGPU.zv)); cudaCheck(cudaFree(onGPU.wv)); cudaCheck(cudaFree(onGPU.chi2)); cudaCheck(cudaFree(onGPU.ptv2)); cudaCheck(cudaFree(onGPU.sortInd)); cudaCheck(cudaFree(onGPU.izt)); cudaCheck(cudaFree(onGPU.nn)); cudaCheck(cudaFree(onGPU_d)); } __global__ void loadTracks(pixelTuplesHeterogeneousProduct::TuplesOnGPU const * tracks, OnGPU * pdata, float ptMin ){ auto const & tuples = *tracks->tuples_d; auto const * fit = tracks->helix_fit_results_d; auto const * quality = tracks->quality_d; auto idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx>= tuples.nbins()) return; if (tuples.size(idx)==0) { return; } if(quality[idx] != pixelTuplesHeterogeneousProduct::loose ) return; auto const & fittedTrack = fit[idx]; if (fittedTrack.par(2)<ptMin) return; auto & data = *pdata; auto it = atomicAdd(data.ntrks,1); data.itrk[it] = idx; data.zt[it] = fittedTrack.par(4); data.ezt2[it] = fittedTrack.cov(4, 4); data.ptt2[it] = fittedTrack.par(2)*fittedTrack.par(2); } void Producer::produce(cudaStream_t stream, TuplesOnCPU const & tracks, float ptMin) { assert(onGPU_d);assert(tracks.gpu_d); cudaCheck(cudaMemsetAsync(onGPU.ntrks, 0, sizeof(uint32_t),stream)); auto blockSize = 128; auto numberOfBlocks = (CAConstants::maxTuples() + blockSize - 1) / blockSize; loadTracks<<<numberOfBlocks,blockSize,0,stream>>>(tracks.gpu_d,onGPU_d, ptMin); cudaCheck(cudaGetLastError()); clusterTracks<<<1,1024-256,0,stream>>>(onGPU_d,minT,eps,errmax,chi2max); cudaCheck(cudaGetLastError()); fitVertices<<<1,1024-256,0,stream>>>(onGPU_d,50.); cudaCheck(cudaGetLastError()); splitVertices<<<1024,128,0,stream>>>(onGPU_d,9.f); cudaCheck(cudaGetLastError()); fitVertices<<<1,1024-256,0,stream>>>(onGPU_d,5000.); cudaCheck(cudaGetLastError()); sortByPt2<<<1,256,0,stream>>>(onGPU_d); cudaCheck(cudaGetLastError()); if(enableTransfer) { cudaCheck(cudaMemcpyAsync(&gpuProduct.nVertices, onGPU.nvFinal, sizeof(uint32_t), cudaMemcpyDeviceToHost, stream)); cudaCheck(cudaMemcpyAsync(&gpuProduct.nTracks, onGPU.ntrks, sizeof(uint32_t), cudaMemcpyDeviceToHost, stream)); } } Producer::OnCPU const & Producer::fillResults(cudaStream_t stream) { if(!enableTransfer) return gpuProduct; // finish copy gpuProduct.ivtx.resize(gpuProduct.nTracks); cudaCheck(cudaMemcpyAsync(gpuProduct.ivtx.data(),onGPU.iv,sizeof(int32_t)*gpuProduct.nTracks, cudaMemcpyDeviceToHost, stream)); gpuProduct.itrk.resize(gpuProduct.nTracks); cudaCheck(cudaMemcpyAsync(gpuProduct.itrk.data(),onGPU.itrk,sizeof(int16_t)*gpuProduct.nTracks, cudaMemcpyDeviceToHost, stream)); gpuProduct.z.resize(gpuProduct.nVertices); cudaCheck(cudaMemcpyAsync(gpuProduct.z.data(),onGPU.zv,sizeof(float)*gpuProduct.nVertices, cudaMemcpyDeviceToHost, stream)); gpuProduct.zerr.resize(gpuProduct.nVertices); cudaCheck(cudaMemcpyAsync(gpuProduct.zerr.data(),onGPU.wv,sizeof(float)*gpuProduct.nVertices, cudaMemcpyDeviceToHost, stream)); gpuProduct.chi2.resize(gpuProduct.nVertices); cudaCheck(cudaMemcpyAsync(gpuProduct.chi2.data(),onGPU.chi2,sizeof(float)*gpuProduct.nVertices, cudaMemcpyDeviceToHost, stream)); gpuProduct.sortInd.resize(gpuProduct.nVertices); cudaCheck(cudaMemcpyAsync(gpuProduct.sortInd.data(),onGPU.sortInd,sizeof(uint16_t)*gpuProduct.nVertices, cudaMemcpyDeviceToHost, stream)); cudaStreamSynchronize(stream); return gpuProduct; } } // end namespace
3127646f897db55c62bf5141c3e405563459c23d.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2023 by XGBoost contributors */ #include <cstddef> // std::size_t #include <cstdint> // std::int32_t #include <vector> // std::vector #include "../common/linalg_op.h" // ElementWiseKernel,cbegin,cend #include "../common/quantile_loss_utils.h" // QuantileLossParam #include "../common/stats.h" // Quantile,WeightedQuantile #include "adaptive.h" // UpdateTreeLeaf #include "dmlc/parameter.h" // DMLC_DECLARE_PARAMETER #include "init_estimation.h" // CheckInitInputs #include "xgboost/base.h" // GradientPair,XGBOOST_DEVICE,bst_target_t #include "xgboost/data.h" // MetaInfo #include "xgboost/host_device_vector.h" // HostDeviceVector #include "xgboost/json.h" // Json,String,ToJson,FromJson #include "xgboost/linalg.h" // Tensor,MakeTensorView,MakeVec #include "xgboost/objective.h" // ObjFunction #include "xgboost/parameter.h" // XGBoostParameter #if defined(XGBOOST_USE_CUDA) #include "../common/linalg_op.cuh" // ElementWiseKernel #include "../common/stats.cuh" // SegmentedQuantile #endif // defined(XGBOOST_USE_CUDA) namespace xgboost { namespace obj { class QuantileRegression : public ObjFunction { common::QuantileLossParam param_; HostDeviceVector<float> alpha_; bst_target_t Targets(MetaInfo const& info) const override { auto const& alpha = param_.quantile_alpha.Get(); CHECK_EQ(alpha.size(), alpha_.Size()) << "The objective is not yet configured."; CHECK_EQ(info.labels.Shape(1), 1) << "Multi-target is not yet supported by the quantile loss."; CHECK(!alpha.empty()); // We have some placeholders for multi-target in the quantile loss. But it's not // supported as the gbtree doesn't know how to slice the gradient and there's no 3-dim // model shape in general. auto n_y = ::max(static_cast<std::size_t>(1), info.labels.Shape(1)); return alpha_.Size() * n_y; } public: void GetGradient(HostDeviceVector<float> const& preds, const MetaInfo& info, std::int32_t iter, HostDeviceVector<GradientPair>* out_gpair) override { if (iter == 0) { CheckInitInputs(info); } CHECK_EQ(param_.quantile_alpha.Get().size(), alpha_.Size()); using SizeT = decltype(info.num_row_); SizeT n_targets = this->Targets(info); SizeT n_alphas = alpha_.Size(); CHECK_NE(n_alphas, 0); CHECK_GE(n_targets, n_alphas); CHECK_EQ(preds.Size(), info.num_row_ * n_targets); auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(n_targets * info.num_row_); auto gpair = linalg::MakeTensorView(ctx_, out_gpair, info.num_row_, n_alphas, n_targets / n_alphas); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); auto n_samples = info.num_row_; alpha_.SetDevice(ctx_->gpu_id); auto alpha = ctx_->IsCPU() ? alpha_.ConstHostSpan() : alpha_.ConstDeviceSpan(); linalg::ElementWiseKernel( ctx_, gpair, [=] XGBOOST_DEVICE(std::size_t i, GradientPair const&) mutable { auto [sample_id, quantile_id, target_id] = linalg::UnravelIndex(i, n_samples, alpha.size(), n_targets / alpha.size()); auto d = predt(i) - labels(sample_id, target_id); auto h = weight[sample_id]; if (d >= 0) { auto g = (1.0f - alpha[quantile_id]) * weight[sample_id]; gpair(sample_id, quantile_id, target_id) = GradientPair{g, h}; } else { auto g = (-alpha[quantile_id] * weight[sample_id]); gpair(sample_id, quantile_id, target_id) = GradientPair{g, h}; } }); } void InitEstimation(MetaInfo const& info, linalg::Vector<float>* base_score) const override { CHECK(!alpha_.Empty()); auto n_targets = this->Targets(info); base_score->SetDevice(ctx_->gpu_id); base_score->Reshape(n_targets); double sw{0}; if (ctx_->IsCPU()) { auto quantiles = base_score->HostView(); auto h_weights = info.weights_.ConstHostVector(); if (info.weights_.Empty()) { sw = info.num_row_; } else { sw = std::accumulate(std::cbegin(h_weights), std::cend(h_weights), 0.0); } for (bst_target_t t{0}; t < n_targets; ++t) { auto alpha = param_.quantile_alpha[t]; auto h_labels = info.labels.HostView(); if (h_weights.empty()) { quantiles(t) = common::Quantile(ctx_, alpha, linalg::cbegin(h_labels), linalg::cend(h_labels)); } else { CHECK_EQ(h_weights.size(), h_labels.Size()); quantiles(t) = common::WeightedQuantile(ctx_, alpha, linalg::cbegin(h_labels), linalg::cend(h_labels), std::cbegin(h_weights)); } } } else { #if defined(XGBOOST_USE_CUDA) alpha_.SetDevice(ctx_->gpu_id); auto d_alpha = alpha_.ConstDeviceSpan(); auto d_labels = info.labels.View(ctx_->gpu_id); auto seg_it = dh::MakeTransformIterator<std::size_t>( thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) { return i * d_labels.Shape(0); }); CHECK_EQ(d_labels.Shape(1), 1); auto val_it = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) { auto sample_idx = i % d_labels.Shape(0); return d_labels(sample_idx, 0); }); auto n = d_labels.Size() * d_alpha.size(); CHECK_EQ(base_score->Size(), d_alpha.size()); if (info.weights_.Empty()) { common::SegmentedQuantile(ctx_, d_alpha.data(), seg_it, seg_it + d_alpha.size() + 1, val_it, val_it + n, base_score->Data()); sw = info.num_row_; } else { info.weights_.SetDevice(ctx_->gpu_id); auto d_weights = info.weights_.ConstDeviceSpan(); auto weight_it = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) { auto sample_idx = i % d_labels.Shape(0); return d_weights[sample_idx]; }); common::SegmentedWeightedQuantile(ctx_, d_alpha.data(), seg_it, seg_it + d_alpha.size() + 1, val_it, val_it + n, weight_it, weight_it + n, base_score->Data()); sw = dh::Reduce(ctx_->CUDACtx()->CTP(), dh::tcbegin(d_weights), dh::tcend(d_weights), 0.0, thrust::plus<double>{}); } #else common::AssertGPUSupport(); #endif // defined(XGBOOST_USE_CUDA) } // For multiple quantiles, we should extend the base score to a vector instead of // computing the average. For now, this is a workaround. linalg::Vector<float> temp; common::Mean(ctx_, *base_score, &temp); double meanq = temp(0) * sw; collective::Allreduce<collective::Operation::kSum>(&meanq, 1); collective::Allreduce<collective::Operation::kSum>(&sw, 1); meanq /= (sw + kRtEps); base_score->Reshape(1); base_score->Data()->Fill(meanq); } void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info, float learning_rate, HostDeviceVector<float> const& prediction, std::int32_t group_idx, RegTree* p_tree) const override { auto alpha = param_.quantile_alpha[group_idx]; ::xgboost::obj::UpdateTreeLeaf(ctx_, position, group_idx, info, learning_rate, prediction, alpha, p_tree); } void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); param_.Validate(); this->alpha_.HostVector() = param_.quantile_alpha.Get(); } ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; } static char const* Name() { return "reg:quantileerror"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Name()); out["quantile_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { CHECK_EQ(get<String const>(in["name"]), Name()); FromJson(in["quantile_loss_param"], &param_); alpha_.HostVector() = param_.quantile_alpha.Get(); } const char* DefaultEvalMetric() const override { return "quantile"; } Json DefaultMetricConfig() const override { CHECK(param_.GetInitialised()); Json config{Object{}}; config["name"] = String{this->DefaultEvalMetric()}; config["quantile_loss_param"] = ToJson(param_); return config; } }; XGBOOST_REGISTER_OBJECTIVE(QuantileRegression, QuantileRegression::Name()) .describe("Regression with quantile loss.") .set_body([]() { return new QuantileRegression(); }); #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(quantile_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) } // namespace obj } // namespace xgboost
3127646f897db55c62bf5141c3e405563459c23d.cu
/** * Copyright 2023 by XGBoost contributors */ #include <cstddef> // std::size_t #include <cstdint> // std::int32_t #include <vector> // std::vector #include "../common/linalg_op.h" // ElementWiseKernel,cbegin,cend #include "../common/quantile_loss_utils.h" // QuantileLossParam #include "../common/stats.h" // Quantile,WeightedQuantile #include "adaptive.h" // UpdateTreeLeaf #include "dmlc/parameter.h" // DMLC_DECLARE_PARAMETER #include "init_estimation.h" // CheckInitInputs #include "xgboost/base.h" // GradientPair,XGBOOST_DEVICE,bst_target_t #include "xgboost/data.h" // MetaInfo #include "xgboost/host_device_vector.h" // HostDeviceVector #include "xgboost/json.h" // Json,String,ToJson,FromJson #include "xgboost/linalg.h" // Tensor,MakeTensorView,MakeVec #include "xgboost/objective.h" // ObjFunction #include "xgboost/parameter.h" // XGBoostParameter #if defined(XGBOOST_USE_CUDA) #include "../common/linalg_op.cuh" // ElementWiseKernel #include "../common/stats.cuh" // SegmentedQuantile #endif // defined(XGBOOST_USE_CUDA) namespace xgboost { namespace obj { class QuantileRegression : public ObjFunction { common::QuantileLossParam param_; HostDeviceVector<float> alpha_; bst_target_t Targets(MetaInfo const& info) const override { auto const& alpha = param_.quantile_alpha.Get(); CHECK_EQ(alpha.size(), alpha_.Size()) << "The objective is not yet configured."; CHECK_EQ(info.labels.Shape(1), 1) << "Multi-target is not yet supported by the quantile loss."; CHECK(!alpha.empty()); // We have some placeholders for multi-target in the quantile loss. But it's not // supported as the gbtree doesn't know how to slice the gradient and there's no 3-dim // model shape in general. auto n_y = std::max(static_cast<std::size_t>(1), info.labels.Shape(1)); return alpha_.Size() * n_y; } public: void GetGradient(HostDeviceVector<float> const& preds, const MetaInfo& info, std::int32_t iter, HostDeviceVector<GradientPair>* out_gpair) override { if (iter == 0) { CheckInitInputs(info); } CHECK_EQ(param_.quantile_alpha.Get().size(), alpha_.Size()); using SizeT = decltype(info.num_row_); SizeT n_targets = this->Targets(info); SizeT n_alphas = alpha_.Size(); CHECK_NE(n_alphas, 0); CHECK_GE(n_targets, n_alphas); CHECK_EQ(preds.Size(), info.num_row_ * n_targets); auto labels = info.labels.View(ctx_->gpu_id); out_gpair->SetDevice(ctx_->gpu_id); out_gpair->Resize(n_targets * info.num_row_); auto gpair = linalg::MakeTensorView(ctx_, out_gpair, info.num_row_, n_alphas, n_targets / n_alphas); info.weights_.SetDevice(ctx_->gpu_id); common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan() : info.weights_.ConstDeviceSpan()}; preds.SetDevice(ctx_->gpu_id); auto predt = linalg::MakeVec(&preds); auto n_samples = info.num_row_; alpha_.SetDevice(ctx_->gpu_id); auto alpha = ctx_->IsCPU() ? alpha_.ConstHostSpan() : alpha_.ConstDeviceSpan(); linalg::ElementWiseKernel( ctx_, gpair, [=] XGBOOST_DEVICE(std::size_t i, GradientPair const&) mutable { auto [sample_id, quantile_id, target_id] = linalg::UnravelIndex(i, n_samples, alpha.size(), n_targets / alpha.size()); auto d = predt(i) - labels(sample_id, target_id); auto h = weight[sample_id]; if (d >= 0) { auto g = (1.0f - alpha[quantile_id]) * weight[sample_id]; gpair(sample_id, quantile_id, target_id) = GradientPair{g, h}; } else { auto g = (-alpha[quantile_id] * weight[sample_id]); gpair(sample_id, quantile_id, target_id) = GradientPair{g, h}; } }); } void InitEstimation(MetaInfo const& info, linalg::Vector<float>* base_score) const override { CHECK(!alpha_.Empty()); auto n_targets = this->Targets(info); base_score->SetDevice(ctx_->gpu_id); base_score->Reshape(n_targets); double sw{0}; if (ctx_->IsCPU()) { auto quantiles = base_score->HostView(); auto h_weights = info.weights_.ConstHostVector(); if (info.weights_.Empty()) { sw = info.num_row_; } else { sw = std::accumulate(std::cbegin(h_weights), std::cend(h_weights), 0.0); } for (bst_target_t t{0}; t < n_targets; ++t) { auto alpha = param_.quantile_alpha[t]; auto h_labels = info.labels.HostView(); if (h_weights.empty()) { quantiles(t) = common::Quantile(ctx_, alpha, linalg::cbegin(h_labels), linalg::cend(h_labels)); } else { CHECK_EQ(h_weights.size(), h_labels.Size()); quantiles(t) = common::WeightedQuantile(ctx_, alpha, linalg::cbegin(h_labels), linalg::cend(h_labels), std::cbegin(h_weights)); } } } else { #if defined(XGBOOST_USE_CUDA) alpha_.SetDevice(ctx_->gpu_id); auto d_alpha = alpha_.ConstDeviceSpan(); auto d_labels = info.labels.View(ctx_->gpu_id); auto seg_it = dh::MakeTransformIterator<std::size_t>( thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) { return i * d_labels.Shape(0); }); CHECK_EQ(d_labels.Shape(1), 1); auto val_it = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) { auto sample_idx = i % d_labels.Shape(0); return d_labels(sample_idx, 0); }); auto n = d_labels.Size() * d_alpha.size(); CHECK_EQ(base_score->Size(), d_alpha.size()); if (info.weights_.Empty()) { common::SegmentedQuantile(ctx_, d_alpha.data(), seg_it, seg_it + d_alpha.size() + 1, val_it, val_it + n, base_score->Data()); sw = info.num_row_; } else { info.weights_.SetDevice(ctx_->gpu_id); auto d_weights = info.weights_.ConstDeviceSpan(); auto weight_it = dh::MakeTransformIterator<float>(thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(std::size_t i) { auto sample_idx = i % d_labels.Shape(0); return d_weights[sample_idx]; }); common::SegmentedWeightedQuantile(ctx_, d_alpha.data(), seg_it, seg_it + d_alpha.size() + 1, val_it, val_it + n, weight_it, weight_it + n, base_score->Data()); sw = dh::Reduce(ctx_->CUDACtx()->CTP(), dh::tcbegin(d_weights), dh::tcend(d_weights), 0.0, thrust::plus<double>{}); } #else common::AssertGPUSupport(); #endif // defined(XGBOOST_USE_CUDA) } // For multiple quantiles, we should extend the base score to a vector instead of // computing the average. For now, this is a workaround. linalg::Vector<float> temp; common::Mean(ctx_, *base_score, &temp); double meanq = temp(0) * sw; collective::Allreduce<collective::Operation::kSum>(&meanq, 1); collective::Allreduce<collective::Operation::kSum>(&sw, 1); meanq /= (sw + kRtEps); base_score->Reshape(1); base_score->Data()->Fill(meanq); } void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info, float learning_rate, HostDeviceVector<float> const& prediction, std::int32_t group_idx, RegTree* p_tree) const override { auto alpha = param_.quantile_alpha[group_idx]; ::xgboost::obj::UpdateTreeLeaf(ctx_, position, group_idx, info, learning_rate, prediction, alpha, p_tree); } void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); param_.Validate(); this->alpha_.HostVector() = param_.quantile_alpha.Get(); } ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; } static char const* Name() { return "reg:quantileerror"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Name()); out["quantile_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { CHECK_EQ(get<String const>(in["name"]), Name()); FromJson(in["quantile_loss_param"], &param_); alpha_.HostVector() = param_.quantile_alpha.Get(); } const char* DefaultEvalMetric() const override { return "quantile"; } Json DefaultMetricConfig() const override { CHECK(param_.GetInitialised()); Json config{Object{}}; config["name"] = String{this->DefaultEvalMetric()}; config["quantile_loss_param"] = ToJson(param_); return config; } }; XGBOOST_REGISTER_OBJECTIVE(QuantileRegression, QuantileRegression::Name()) .describe("Regression with quantile loss.") .set_body([]() { return new QuantileRegression(); }); #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(quantile_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) } // namespace obj } // namespace xgboost
505cfdb0db1a4a56c5305e4bd7edb33682f64289.hip
// !!! This is a file automatically generated by hipify!!! // moveArrays.cu // // demonstrates CUDA interface to data allocation on device (GPU) // and data movement between host (CPU) and device. #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> int main(void) { float *a_h, *b_h; // pointers to host memory float *a_d, *b_d; // pointers to device memory int N = 14; int i; // allocate arrays on host a_h = (float *)malloc(sizeof(float)*N); b_h = (float *)malloc(sizeof(float)*N); // allocate arrays on device hipMalloc((void **) &a_d, sizeof(float)*N); hipMalloc((void **) &b_d, sizeof(float)*N); // initialize host data for (i=0; i<N; i++) { a_h[i] = 10.f+i; b_h[i] = 0.f; } // send data from host to device: a_h to a_d hipMemcpy(a_d, a_h, sizeof(float)*N, hipMemcpyHostToDevice); // copy data within device: a_d to b_d hipMemcpy(b_d, a_d, sizeof(float)*N, hipMemcpyDeviceToDevice); // retrieve data from device: b_d to b_h hipMemcpy(b_h, b_d, sizeof(float)*N, hipMemcpyDeviceToHost); // check result for (i=0; i<N; i++) assert(a_h[i] == b_h[i]); // cleanup free(a_h); free(b_h); hipFree(a_d); hipFree(b_d); return 0; }
505cfdb0db1a4a56c5305e4bd7edb33682f64289.cu
// moveArrays.cu // // demonstrates CUDA interface to data allocation on device (GPU) // and data movement between host (CPU) and device. #include <stdio.h> #include <assert.h> #include <cuda.h> int main(void) { float *a_h, *b_h; // pointers to host memory float *a_d, *b_d; // pointers to device memory int N = 14; int i; // allocate arrays on host a_h = (float *)malloc(sizeof(float)*N); b_h = (float *)malloc(sizeof(float)*N); // allocate arrays on device cudaMalloc((void **) &a_d, sizeof(float)*N); cudaMalloc((void **) &b_d, sizeof(float)*N); // initialize host data for (i=0; i<N; i++) { a_h[i] = 10.f+i; b_h[i] = 0.f; } // send data from host to device: a_h to a_d cudaMemcpy(a_d, a_h, sizeof(float)*N, cudaMemcpyHostToDevice); // copy data within device: a_d to b_d cudaMemcpy(b_d, a_d, sizeof(float)*N, cudaMemcpyDeviceToDevice); // retrieve data from device: b_d to b_h cudaMemcpy(b_h, b_d, sizeof(float)*N, cudaMemcpyDeviceToHost); // check result for (i=0; i<N; i++) assert(a_h[i] == b_h[i]); // cleanup free(a_h); free(b_h); cudaFree(a_d); cudaFree(b_d); return 0; }
8bd544d503f8bea4adfc0bbd23cc08ca7b2ce0ea.hip
// !!! This is a file automatically generated by hipify!!! #include "LBvhKernels.cuh" #include <hip/hip_runtime.h> #include "utility\CudaDeviceUtils.h" #include "BvhExtNode.h" #include "BvhIntNode.h" namespace mn { __global__ void calibrateLeafRangeMarks(int size, BvhIntNodeCompletePort _tks, const int* _leafRestrRoots, const int* _intRestrMarks, int* _leafRangeMarks) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; int subrt = _leafRestrRoots[idx]; if (subrt != INT_MAX && _intRestrMarks[subrt] == 1) { ///< second condition ensures picking the largest covering subtree atomicAdd(_leafRangeMarks + idx, 1); atomicAdd(_leafRangeMarks + _tks.getrangey(subrt) + 1, -1); } } __global__ void calibrateRestrRoots(int size, BvhIntNodeCompletePort _tks, const int* _leafRestrRoots, const int* _intRestrMarks, int* _leafRestrRootMarks, int* _numSubtree, uint* _subtreeSizes, int* _subtrees, int* _numRtIntNode) { /// count number of affected int nodes int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; int root = _leafRestrRoots[idx]; if (root != INT_MAX && _intRestrMarks[root] == 1) { //printf("\tadding %d-th [%d, %d] int node need restructure\n", root, _tks.rangex(root), _tks.rangey(root)); int2 range{ _tks.getrangex(root), _tks.getrangey(root) }; atomicAdd(_leafRestrRootMarks + idx, root); atomicAdd(_leafRestrRootMarks + range.y + 1, -root); atomicAdd(_numRtIntNode, range.y - range.x); int id = atomicAdd(_numSubtree, 1); _subtrees[id] = root; _subtreeSizes[id] = range.y - range.x + 1; } } __global__ void calcRestrMCs(int size, const int3* _faces, const PointType* _vertices, BOX scene, const int* _primRestrMarks, const int* _primmap, uint* codes) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; //for (; idx < size; idx += gridDim.x * blockDim.x) { int pid = _primmap[idx]; if (_primRestrMarks[pid]) { BOX bv{}; auto v = _vertices[_faces[idx].x]; bv.combines(v.x, v.y, v.z); v = _vertices[_faces[idx].y]; bv.combines(v.x, v.y, v.z); v = _vertices[_faces[idx].z]; bv.combines(v.x, v.y, v.z); const auto c = bv.center(); const auto offset = c - scene._min; codes[pid] = morton3D(offset.x / scene.width(), offset.y / scene.height(), offset.z / scene.depth()); } //} } __global__ void selectPrimitives(int primsize, const int* _leafRestrRoots, const int* _gatherMap, const MCSize* _mtcodes, uint64* _finalKeys, int* _scatterMap) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= primsize) return; if (_leafRestrRoots[idx] > 0) { int gatherPos = _gatherMap[idx]; /// idx -> cptPos _finalKeys[gatherPos] = ((uint64)_leafRestrRoots[idx] << 32) | _mtcodes[idx]; _scatterMap[gatherPos] = idx; ///< going to be sorted soon } } __global__ void updatePrimMap(int restrPrimNum, int* _primIds, int* _newPrimIds, int* _primToIdx, int* _primmap) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= restrPrimNum) return; int val; _primmap[val = _primToIdx[_primIds[idx]]] = _newPrimIds[idx]; _primToIdx[_newPrimIds[idx]] = val; //_primmap[_primToIdx[_primIds[idx]]] = _primIds[idx]; //_primmap[_primIds[idx]] = _primIds[idx]; //_primToIdx[_primIds[idx]] = _primIds[idx]; } __global__ void updatePrimAndExtNode(int primsize, const int *_primRestrMarks, const int * _primMap, const int3* _faces, const PointType * _vertices, const BOX * scene, BvhExtNodeCompletePort _lvs) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= primsize) return; auto &_prims = _lvs.refPrimPort(); //auto _prims = _lvs.getPrimPort(); int pid = _primMap[idx]; BOX bv{}; auto v = _vertices[_faces[idx].x]; bv.combines(v.x, v.y, v.z); v = _vertices[_faces[idx].y]; bv.combines(v.x, v.y, v.z); v = _vertices[_faces[idx].z]; bv.combines(v.x, v.y, v.z); // primitive layer _prims.setBV(pid, bv); // ext node layer //int extId = _prims.extno(idx) - 1; //const auto &primBvs = _lvs.primBvs(); //auto &extBvs = _lvs.refExtBvs(); // issues _lvs.setBV(pid, bv); //atomicMinD(&extBvs.minx(extId), primBvs.getminx(idx)); //atomicMinD(&extBvs.miny(extId), primBvs.getminy(idx)); //atomicMinD(&extBvs.minz(extId), primBvs.getminz(idx)); //atomicMaxD(&extBvs.maxx(extId), primBvs.getmaxx(idx)); //atomicMaxD(&extBvs.maxy(extId), primBvs.getmaxy(idx)); //atomicMaxD(&extBvs.maxz(extId), primBvs.getmaxz(idx)); // restr primitive if (_primRestrMarks[pid]) { _prims.vida(pid) = _faces[idx].x; _prims.vidb(pid) = _faces[idx].y; _prims.vidc(pid) = _faces[idx].z; //_prims.idx(pid) = idx; //_prims.type(pid) = static_cast<uchar>(ModelType::FixedDeformableType); const auto c = bv.center(); const auto offset = c - scene->_min; _prims.mtcode(pid) = morton3D(offset.x / scene->width(), offset.y / scene->height(), offset.z / scene->depth()); } } __global__ void restrIntNodes(int extSize, int numRtExtNode, const int *_restrExtNodes, const uint *_prevTkMarks, const int *_leafRestrRoots, uint *_depths, int *_localLcas, BvhExtNodeCompletePort _lvs, BvhIntNodeCompletePort _tks) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= numRtExtNode) return; idx = _restrExtNodes[idx]; _localLcas[idx] = -1; //, _lvs.rcl(idx) = 0; const int subtreeRoot = _leafRestrRoots[idx]; if (subtreeRoot == 0) return; ///< actually not necessary, skipped from the beginning int l = idx - 1, r = idx; ///< (l, r] bool mark; char bdmark = (l < 0 || _leafRestrRoots[l] != subtreeRoot) << 1 | (r == extSize - 1 || _leafRestrRoots[r + 1] != subtreeRoot); if (bdmark) mark = bdmark & 1; else mark = _lvs.getmetric(l) < _lvs.getmetric(r); ///< true when right child, false otherwise int cur = mark ? l : r; _lvs.par(idx) = cur; if (mark) _tks.rc(cur) = idx, _tks.rangey(cur) = idx, atomicOr(&_tks.mark(cur), 0x00000002) , _lvs.mark(idx) = 0x00000007; else _tks.lc(cur) = idx, _tks.rangex(cur) = idx, atomicOr(&_tks.mark(cur), 0x00000001) , _lvs.mark(idx) = 0x00000003; while (atomicAdd(&_tks.flag(cur), 1) == 1) { _tks.refit(cur, _lvs); _tks.mark(cur) &= 0x00000007; l = _tks.rangex(cur) - 1, r = _tks.rangey(cur); _localLcas[l + 1] = cur, _depths[l + 1]++; bdmark = (l < 0 || _leafRestrRoots[l] != subtreeRoot) << 1 | (r == extSize - 1 || _leafRestrRoots[r + 1] != subtreeRoot); if (bdmark) mark = bdmark & 1; else mark = _lvs.getmetric(l) < _lvs.getmetric(r); if (bdmark == 3) { /// relationship with father if (_prevTkMarks[subtreeRoot] & 4) _tks.mark(cur) |= 0x00000004; else _tks.mark(cur) &= 0xFFFFFFFB; _tks.par(cur) = -1; ///< sentinel mark, no need modify mapping break; } int par = mark ? l : r; _tks.par(cur) = par; if (mark) _tks.rc(par) = cur, _tks.rangey(par) = r, atomicAnd(&_tks.mark(par), 0xFFFFFFFD), _tks.mark(cur) |= 0x00000004; else _tks.lc(par) = cur, _tks.rangex(par) = l + 1, atomicAnd(&_tks.mark(par), 0xFFFFFFFE), _tks.mark(cur) &= 0xFFFFFFFB; cur = par; } } __global__ void calcRestrIntNodeOrders(int numRtExtNode, const int *_restrExtNodes, const uint *_depths, const uint *_offsets, const int *_subtreeRoots, const int* _prevLbds, const uint *_prevTkMarks, const int* _localLcas, int *_lcas, int *_pars, BvhIntNodeCompletePort _unorderedTks, int *_tkMap, int *_sequence) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= numRtExtNode) return; idx = _restrExtNodes[idx]; ///< leaf (ext node) index int node = _localLcas[idx]; ///< index of the unsorted trunk if (node != -1) { int depth = _subtreeRoots[idx], pos = _offsets[idx]; ///< first work as the root of the restr subtree /// for now, the rangex of the original subtree root is that of the newly built _unorderedTks int newId = depth + pos - _offsets[_prevLbds[depth]]; //if (_lcas[idx] != depth << 1) //if (!(_prevTkMarks[depth] & 4 == 0 && _prevLbds[_lcas[idx] >> 1] == _prevLbds[newId])) if (_prevTkMarks[depth] & 4 || idx != _prevLbds[depth]) _lcas[idx] = newId << 1; for (depth = _depths[idx]; depth--; node = _unorderedTks.getlc(node)) { if (_unorderedTks.getmark(node) & 2) { _pars[_unorderedTks.getrc(node)] = newId; } _tkMap[node] = newId++; _sequence[pos++] = node; } _pars[idx] = newId - 1; } else { _lcas[idx] = idx << 1 | 1; } } __global__ void reorderRestrIntNodes(int numRtIntNode, const int *_restrIntNodes, const int *_tkMap, BvhIntNodeCompletePort _unorderedTks, BvhIntNodeCompletePort _tks) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= numRtIntNode) return; idx = _restrIntNodes[idx]; int newId = _tkMap[idx]; uint mark = _unorderedTks.getmark(idx); _tks.mark(newId) = mark; _tks.lc(newId) = mark & 1 ? _unorderedTks.getlc(idx) : _tkMap[_unorderedTks.getlc(idx)]; _tks.rc(newId) = mark & 2 ? _unorderedTks.getrc(idx) : _tkMap[_unorderedTks.getrc(idx)]; if ((mark = _unorderedTks.getpar(idx)) != -1) _tks.par(newId) = _tkMap[mark]; _tks.rangex(newId) = _unorderedTks.getrangex(idx); _tks.rangey(newId) = _unorderedTks.getrangey(idx); //_tks.rcd(newId) = _rcls[mark] - _unorderedTks.getrcd(idx); _tks.setBV(newId, _unorderedTks, idx); } }
8bd544d503f8bea4adfc0bbd23cc08ca7b2ce0ea.cu
#include "LBvhKernels.cuh" #include <cuda_runtime.h> #include "utility\CudaDeviceUtils.h" #include "BvhExtNode.h" #include "BvhIntNode.h" namespace mn { __global__ void calibrateLeafRangeMarks(int size, BvhIntNodeCompletePort _tks, const int* _leafRestrRoots, const int* _intRestrMarks, int* _leafRangeMarks) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; int subrt = _leafRestrRoots[idx]; if (subrt != INT_MAX && _intRestrMarks[subrt] == 1) { ///< second condition ensures picking the largest covering subtree atomicAdd(_leafRangeMarks + idx, 1); atomicAdd(_leafRangeMarks + _tks.getrangey(subrt) + 1, -1); } } __global__ void calibrateRestrRoots(int size, BvhIntNodeCompletePort _tks, const int* _leafRestrRoots, const int* _intRestrMarks, int* _leafRestrRootMarks, int* _numSubtree, uint* _subtreeSizes, int* _subtrees, int* _numRtIntNode) { /// count number of affected int nodes int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; int root = _leafRestrRoots[idx]; if (root != INT_MAX && _intRestrMarks[root] == 1) { //printf("\tadding %d-th [%d, %d] int node need restructure\n", root, _tks.rangex(root), _tks.rangey(root)); int2 range{ _tks.getrangex(root), _tks.getrangey(root) }; atomicAdd(_leafRestrRootMarks + idx, root); atomicAdd(_leafRestrRootMarks + range.y + 1, -root); atomicAdd(_numRtIntNode, range.y - range.x); int id = atomicAdd(_numSubtree, 1); _subtrees[id] = root; _subtreeSizes[id] = range.y - range.x + 1; } } __global__ void calcRestrMCs(int size, const int3* _faces, const PointType* _vertices, BOX scene, const int* _primRestrMarks, const int* _primmap, uint* codes) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; //for (; idx < size; idx += gridDim.x * blockDim.x) { int pid = _primmap[idx]; if (_primRestrMarks[pid]) { BOX bv{}; auto v = _vertices[_faces[idx].x]; bv.combines(v.x, v.y, v.z); v = _vertices[_faces[idx].y]; bv.combines(v.x, v.y, v.z); v = _vertices[_faces[idx].z]; bv.combines(v.x, v.y, v.z); const auto c = bv.center(); const auto offset = c - scene._min; codes[pid] = morton3D(offset.x / scene.width(), offset.y / scene.height(), offset.z / scene.depth()); } //} } __global__ void selectPrimitives(int primsize, const int* _leafRestrRoots, const int* _gatherMap, const MCSize* _mtcodes, uint64* _finalKeys, int* _scatterMap) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= primsize) return; if (_leafRestrRoots[idx] > 0) { int gatherPos = _gatherMap[idx]; /// idx -> cptPos _finalKeys[gatherPos] = ((uint64)_leafRestrRoots[idx] << 32) | _mtcodes[idx]; _scatterMap[gatherPos] = idx; ///< going to be sorted soon } } __global__ void updatePrimMap(int restrPrimNum, int* _primIds, int* _newPrimIds, int* _primToIdx, int* _primmap) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= restrPrimNum) return; int val; _primmap[val = _primToIdx[_primIds[idx]]] = _newPrimIds[idx]; _primToIdx[_newPrimIds[idx]] = val; //_primmap[_primToIdx[_primIds[idx]]] = _primIds[idx]; //_primmap[_primIds[idx]] = _primIds[idx]; //_primToIdx[_primIds[idx]] = _primIds[idx]; } __global__ void updatePrimAndExtNode(int primsize, const int *_primRestrMarks, const int * _primMap, const int3* _faces, const PointType * _vertices, const BOX * scene, BvhExtNodeCompletePort _lvs) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= primsize) return; auto &_prims = _lvs.refPrimPort(); //auto _prims = _lvs.getPrimPort(); int pid = _primMap[idx]; BOX bv{}; auto v = _vertices[_faces[idx].x]; bv.combines(v.x, v.y, v.z); v = _vertices[_faces[idx].y]; bv.combines(v.x, v.y, v.z); v = _vertices[_faces[idx].z]; bv.combines(v.x, v.y, v.z); // primitive layer _prims.setBV(pid, bv); // ext node layer //int extId = _prims.extno(idx) - 1; //const auto &primBvs = _lvs.primBvs(); //auto &extBvs = _lvs.refExtBvs(); // issues _lvs.setBV(pid, bv); //atomicMinD(&extBvs.minx(extId), primBvs.getminx(idx)); //atomicMinD(&extBvs.miny(extId), primBvs.getminy(idx)); //atomicMinD(&extBvs.minz(extId), primBvs.getminz(idx)); //atomicMaxD(&extBvs.maxx(extId), primBvs.getmaxx(idx)); //atomicMaxD(&extBvs.maxy(extId), primBvs.getmaxy(idx)); //atomicMaxD(&extBvs.maxz(extId), primBvs.getmaxz(idx)); // restr primitive if (_primRestrMarks[pid]) { _prims.vida(pid) = _faces[idx].x; _prims.vidb(pid) = _faces[idx].y; _prims.vidc(pid) = _faces[idx].z; //_prims.idx(pid) = idx; //_prims.type(pid) = static_cast<uchar>(ModelType::FixedDeformableType); const auto c = bv.center(); const auto offset = c - scene->_min; _prims.mtcode(pid) = morton3D(offset.x / scene->width(), offset.y / scene->height(), offset.z / scene->depth()); } } __global__ void restrIntNodes(int extSize, int numRtExtNode, const int *_restrExtNodes, const uint *_prevTkMarks, const int *_leafRestrRoots, uint *_depths, int *_localLcas, BvhExtNodeCompletePort _lvs, BvhIntNodeCompletePort _tks) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= numRtExtNode) return; idx = _restrExtNodes[idx]; _localLcas[idx] = -1; //, _lvs.rcl(idx) = 0; const int subtreeRoot = _leafRestrRoots[idx]; if (subtreeRoot == 0) return; ///< actually not necessary, skipped from the beginning int l = idx - 1, r = idx; ///< (l, r] bool mark; char bdmark = (l < 0 || _leafRestrRoots[l] != subtreeRoot) << 1 | (r == extSize - 1 || _leafRestrRoots[r + 1] != subtreeRoot); if (bdmark) mark = bdmark & 1; else mark = _lvs.getmetric(l) < _lvs.getmetric(r); ///< true when right child, false otherwise int cur = mark ? l : r; _lvs.par(idx) = cur; if (mark) _tks.rc(cur) = idx, _tks.rangey(cur) = idx, atomicOr(&_tks.mark(cur), 0x00000002) , _lvs.mark(idx) = 0x00000007; else _tks.lc(cur) = idx, _tks.rangex(cur) = idx, atomicOr(&_tks.mark(cur), 0x00000001) , _lvs.mark(idx) = 0x00000003; while (atomicAdd(&_tks.flag(cur), 1) == 1) { _tks.refit(cur, _lvs); _tks.mark(cur) &= 0x00000007; l = _tks.rangex(cur) - 1, r = _tks.rangey(cur); _localLcas[l + 1] = cur, _depths[l + 1]++; bdmark = (l < 0 || _leafRestrRoots[l] != subtreeRoot) << 1 | (r == extSize - 1 || _leafRestrRoots[r + 1] != subtreeRoot); if (bdmark) mark = bdmark & 1; else mark = _lvs.getmetric(l) < _lvs.getmetric(r); if (bdmark == 3) { /// relationship with father if (_prevTkMarks[subtreeRoot] & 4) _tks.mark(cur) |= 0x00000004; else _tks.mark(cur) &= 0xFFFFFFFB; _tks.par(cur) = -1; ///< sentinel mark, no need modify mapping break; } int par = mark ? l : r; _tks.par(cur) = par; if (mark) _tks.rc(par) = cur, _tks.rangey(par) = r, atomicAnd(&_tks.mark(par), 0xFFFFFFFD), _tks.mark(cur) |= 0x00000004; else _tks.lc(par) = cur, _tks.rangex(par) = l + 1, atomicAnd(&_tks.mark(par), 0xFFFFFFFE), _tks.mark(cur) &= 0xFFFFFFFB; cur = par; } } __global__ void calcRestrIntNodeOrders(int numRtExtNode, const int *_restrExtNodes, const uint *_depths, const uint *_offsets, const int *_subtreeRoots, const int* _prevLbds, const uint *_prevTkMarks, const int* _localLcas, int *_lcas, int *_pars, BvhIntNodeCompletePort _unorderedTks, int *_tkMap, int *_sequence) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= numRtExtNode) return; idx = _restrExtNodes[idx]; ///< leaf (ext node) index int node = _localLcas[idx]; ///< index of the unsorted trunk if (node != -1) { int depth = _subtreeRoots[idx], pos = _offsets[idx]; ///< first work as the root of the restr subtree /// for now, the rangex of the original subtree root is that of the newly built _unorderedTks int newId = depth + pos - _offsets[_prevLbds[depth]]; //if (_lcas[idx] != depth << 1) //if (!(_prevTkMarks[depth] & 4 == 0 && _prevLbds[_lcas[idx] >> 1] == _prevLbds[newId])) if (_prevTkMarks[depth] & 4 || idx != _prevLbds[depth]) _lcas[idx] = newId << 1; for (depth = _depths[idx]; depth--; node = _unorderedTks.getlc(node)) { if (_unorderedTks.getmark(node) & 2) { _pars[_unorderedTks.getrc(node)] = newId; } _tkMap[node] = newId++; _sequence[pos++] = node; } _pars[idx] = newId - 1; } else { _lcas[idx] = idx << 1 | 1; } } __global__ void reorderRestrIntNodes(int numRtIntNode, const int *_restrIntNodes, const int *_tkMap, BvhIntNodeCompletePort _unorderedTks, BvhIntNodeCompletePort _tks) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= numRtIntNode) return; idx = _restrIntNodes[idx]; int newId = _tkMap[idx]; uint mark = _unorderedTks.getmark(idx); _tks.mark(newId) = mark; _tks.lc(newId) = mark & 1 ? _unorderedTks.getlc(idx) : _tkMap[_unorderedTks.getlc(idx)]; _tks.rc(newId) = mark & 2 ? _unorderedTks.getrc(idx) : _tkMap[_unorderedTks.getrc(idx)]; if ((mark = _unorderedTks.getpar(idx)) != -1) _tks.par(newId) = _tkMap[mark]; _tks.rangex(newId) = _unorderedTks.getrangex(idx); _tks.rangey(newId) = _unorderedTks.getrangey(idx); //_tks.rcd(newId) = _rcls[mark] - _unorderedTks.getrcd(idx); _tks.setBV(newId, _unorderedTks, idx); } }
c1c4342149258dcdb2c08ee4443fb2da56d042ae.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
c1c4342149258dcdb2c08ee4443fb2da56d042ae.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
db25563987989927ef42d17a01721802fa2342c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define THREADS 10 // BOTA GLOBAL __global__ void prodEscalar(int* A, int* B, int* prodEsc, int* somaDosProd, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i < dim && j < dim ){ int produto = A[i*dim + j] * B[i*dim + j]; atomicAdd(prodEsc+i, produto); atomicAdd(somaDosProd, produto); } } __global__ void soma_elementos(int *vetorA,int *soma,int tam){ //Calcula o ndice global da thread int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (idx < tam){ //Faz a soma entre elemento do vetor no ndice idx e o contedo de soma atomicAdd(soma,vetorA[idx]); } } __global__ void min_max_elementos(int *A, int *B, int *max_comp, int tam) { //Calcula o ndice global da thread int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (idx < tam) { //Faz a soma entre elemento do vetor no ndice idx e o contedo de soma int local_max = A[i*dim + j] > B[i*dim + j] ? A : B; int local_min = A[i*dim + j] < B[i*dim + j] ? A : B; atomicMax(max_comp, local_max); atomicMin(max_comp, local_min); } } // Matriz A e B na memoria global // int main(int argc, char **argv) { // Declara as matrizes int *A, *B; // Declara as variveis de ndice int i, j, k, dim; // Declara o acumulador para o produto escalar global int somape, minimo, maximo; // Declara um vetor para os produtos escalares locais int *prod_escalar; FILE *inputfile; // handler para o arquivo de entrada char *inputfilename; // nome do arquivo de entrada if (argc < 2) { printf("Please run with input file name, i.e., num_perf_mpi inputfile.ext\n"); exit(-1); } inputfilename = (char *)malloc(256 * sizeof(char)); strcpy(inputfilename, argv[1]); printf("inputfilename=%s\n", inputfilename); fflush(0); if ((inputfile = fopen(inputfilename, "r")) == 0) { printf("Error openning input file.\n"); exit(-1); } fscanf(inputfile, "%d\n", &dim); // L a dimenso das matrizes // Aloca as matrizes A = (int *)malloc(dim * dim * sizeof(int)); B = (int *)malloc(dim * dim * sizeof(int)); // Aloca um vetor para armazenar os produtos escalares de cada linha prod_escalar = (int *)malloc(dim * sizeof(int)); // L a matriz A for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { fscanf(inputfile, "%d ", &(A[i * dim + j])); } } // L a matriz B for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { fscanf(inputfile, "%d ", &(B[i * dim + j])); } } // fecha o arquivo de entrada fclose(inputfile); int *A_d, *B_d; //aloca matrizes na gpu hipMalloc(&A_d,dim * dim * sizeof(int)); hipMalloc(&B_d,dim * dim * sizeof(int)); hipMemcpy(A, A_d, dim*dim*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(B, B_d, dim*dim*sizeof(int), hipMemcpyDeviceToHost); int *min_D, *max_D, *prod_D, *soma_D; hipMalloc(&min_D, sizeof(int)); hipMalloc(&max_D, sizeof(int)); hipMalloc(&prod_D, sizeof(int)); hipMalloc(&soma_D, sizeof(int)); //Define a quantidade de threads por bloco dim3 threadsPerBlock(THREADS,THREADS); //Define a quantidade de blocos por grade dim3 blocksPerGrid((dim+(threadsPerBlock.x-1)) / threadsPerBlock.x, (dim+(threadsPerBlock.y-1)) / threadsPerBlock.y); min_max_elementos<<blocksPerGridb, threadsPerBlock>>(A_d, B_d, min_D, dim); prodEscalar<<blocksPerGridb, threadsPerBlock>>(A_d, B_d, prod_D, min_D, soma_D, dim); /* // Calcula o produto escalar de cada linha for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { prod_escalar[j] += A[i * dim + j] * B[i * dim + j]; } } // Acumula os produtos das linhas (faz o produto escalar global) somape = 0; for (i = 0; i < dim; i++) { somape += prod_escalar[i]; } // encontra o mnimo da matriz A minimo = A[0]; for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { if (A[i * dim + j] < minimo) { minimo = A[i * dim + j]; } } } // encontra o mnimo da matriz B for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { if (B[i * dim + j] < minimo) { minimo = B[i * dim + j]; } } } // encontra o mximo da matriz A maximo = A[0]; for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { if (A[i * dim + j] > maximo) { maximo = A[i * dim + j]; } } } // encontra o mximo da matriz B for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { if (B[i * dim + j] > maximo) { maximo = B[i * dim + j]; } } } */ // Imprime o resultado printf("%d %d %d\n", somape, minimo, maximo); // Libera as matrizes free(A); free(B); hipFree(B_d); hipFree(A_d); hipFree(prod_escalar_d); // Libera o vetor free(prod_escalar); }
db25563987989927ef42d17a01721802fa2342c7.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #define THREADS 10 // BOTA GLOBAL __global__ void prodEscalar(int* A, int* B, int* prodEsc, int* somaDosProd, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i < dim && j < dim ){ int produto = A[i*dim + j] * B[i*dim + j]; atomicAdd(prodEsc+i, produto); atomicAdd(somaDosProd, produto); } } __global__ void soma_elementos(int *vetorA,int *soma,int tam){ //Calcula o índice global da thread int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (idx < tam){ //Faz a soma entre elemento do vetor no índice idx e o conteúdo de soma atomicAdd(soma,vetorA[idx]); } } __global__ void min_max_elementos(int *A, int *B, int *max_comp, int tam) { //Calcula o índice global da thread int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (idx < tam) { //Faz a soma entre elemento do vetor no índice idx e o conteúdo de soma int local_max = A[i*dim + j] > B[i*dim + j] ? A : B; int local_min = A[i*dim + j] < B[i*dim + j] ? A : B; atomicMax(max_comp, local_max); atomicMin(max_comp, local_min); } } // Matriz A e B na memoria global // int main(int argc, char **argv) { // Declara as matrizes int *A, *B; // Declara as variáveis de índice int i, j, k, dim; // Declara o acumulador para o produto escalar global int somape, minimo, maximo; // Declara um vetor para os produtos escalares locais int *prod_escalar; FILE *inputfile; // handler para o arquivo de entrada char *inputfilename; // nome do arquivo de entrada if (argc < 2) { printf("Please run with input file name, i.e., num_perf_mpi inputfile.ext\n"); exit(-1); } inputfilename = (char *)malloc(256 * sizeof(char)); strcpy(inputfilename, argv[1]); printf("inputfilename=%s\n", inputfilename); fflush(0); if ((inputfile = fopen(inputfilename, "r")) == 0) { printf("Error openning input file.\n"); exit(-1); } fscanf(inputfile, "%d\n", &dim); // Lê a dimensão das matrizes // Aloca as matrizes A = (int *)malloc(dim * dim * sizeof(int)); B = (int *)malloc(dim * dim * sizeof(int)); // Aloca um vetor para armazenar os produtos escalares de cada linha prod_escalar = (int *)malloc(dim * sizeof(int)); // Lê a matriz A for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { fscanf(inputfile, "%d ", &(A[i * dim + j])); } } // Lê a matriz B for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { fscanf(inputfile, "%d ", &(B[i * dim + j])); } } // fecha o arquivo de entrada fclose(inputfile); int *A_d, *B_d; //aloca matrizes na gpu cudaMalloc(&A_d,dim * dim * sizeof(int)); cudaMalloc(&B_d,dim * dim * sizeof(int)); cudaMemcpy(A, A_d, dim*dim*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(B, B_d, dim*dim*sizeof(int), cudaMemcpyDeviceToHost); int *min_D, *max_D, *prod_D, *soma_D; cudaMalloc(&min_D, sizeof(int)); cudaMalloc(&max_D, sizeof(int)); cudaMalloc(&prod_D, sizeof(int)); cudaMalloc(&soma_D, sizeof(int)); //Define a quantidade de threads por bloco dim3 threadsPerBlock(THREADS,THREADS); //Define a quantidade de blocos por grade dim3 blocksPerGrid((dim+(threadsPerBlock.x-1)) / threadsPerBlock.x, (dim+(threadsPerBlock.y-1)) / threadsPerBlock.y); min_max_elementos<<blocksPerGridb, threadsPerBlock>>(A_d, B_d, min_D, dim); prodEscalar<<blocksPerGridb, threadsPerBlock>>(A_d, B_d, prod_D, min_D, soma_D, dim); /* // Calcula o produto escalar de cada linha for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { prod_escalar[j] += A[i * dim + j] * B[i * dim + j]; } } // Acumula os produtos das linhas (faz o produto escalar global) somape = 0; for (i = 0; i < dim; i++) { somape += prod_escalar[i]; } // encontra o mínimo da matriz A minimo = A[0]; for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { if (A[i * dim + j] < minimo) { minimo = A[i * dim + j]; } } } // encontra o mínimo da matriz B for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { if (B[i * dim + j] < minimo) { minimo = B[i * dim + j]; } } } // encontra o máximo da matriz A maximo = A[0]; for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { if (A[i * dim + j] > maximo) { maximo = A[i * dim + j]; } } } // encontra o máximo da matriz B for (i = 0; i < dim; i++) { for (j = 0; j < dim; j++) { if (B[i * dim + j] > maximo) { maximo = B[i * dim + j]; } } } */ // Imprime o resultado printf("%d %d %d\n", somape, minimo, maximo); // Libera as matrizes free(A); free(B); cudaFree(B_d); cudaFree(A_d); cudaFree(prod_escalar_d); // Libera o vetor free(prod_escalar); }
fe33e627906c47c4369003e00515c876773b3ead.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudamat_kernels.cuh" #include "float.h" /* ------------------------- Random number generation ------------------------- */ __global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // The initial x is the seed and the initial carry is 1 unsigned long long rndWord = ((unsigned long long)seed << 32) + 1; const unsigned int rndMult = rndMults[idx]; /* * Run the chain for a few steps so that all the streams have a chance * to differentiate. They start out generating similar random numbers * because all the multipliers are similar. */ for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); } rndWords[idx] = rndWord; } __global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long rndWord = rndWords[idx]; const unsigned int rndMult = rndMults[idx]; for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; } rndWords[idx] = rndWord; } __global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long rndWord = rndWords[idx]; const unsigned int rndMult = rndMults[idx]; float rnd1, rnd2, R, T; for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; T = 2 * PI * rnd2; R = sqrtf(-2 * __logf(rnd1)); gData[i] = R * __cosf(T); if (i + NUM_RND_STREAMS < numElements) gData[i + NUM_RND_STREAMS] = R * __sinf(T); } rndWords[idx] = rndWord; } /* ------------------------- Data copying ------------------------- */ /* Copy row slice from source to target. There is a block for every 32x32 chunk being copied. */ __global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) { const int row = start + blockIdx.x * 32 + threadIdx.x; const int start_col = blockIdx.y * 32; const int end_col = (start_col + 32 < width) ? start_col + 32: width; const int target_height = end - start; if (row < end) { for (int cur_col = start_col; cur_col < end_col; cur_col++) target[cur_col * target_height + row - start] = source[cur_col * height + row]; } } __global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) { const int row = start + blockIdx.x * 32 + threadIdx.x; const int start_col = blockIdx.y * 32; const int end_col = (start_col + 32 < width) ? start_col + 32: width; const int source_height = end - start; if (row < end) { for (int cur_col = start_col; cur_col < end_col; cur_col++) target[cur_col * height + row] = source[cur_col * source_height + row - start]; //source[cur_col * height + row - start] = target[cur_col * target_height + row]; } } __global__ void kTranspose(float *odata, float *idata, int width, int height) { __shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x; unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x; yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } /* ------------------------- Mathematical operations ------------------------- */ __global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] < mat2[i]; } } __global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] < val; } } __global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] > mat2[i]; } } __global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] > val; } } __global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float max_vals[32]; float cur_max = -FLT_MAX; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[blockIdx.x * height + i]; if (val > cur_max) cur_max = val; } max_vals[threadIdx.x] = cur_max; __syncthreads(); if (threadIdx.x == 0) { cur_max = -FLT_MAX; for (unsigned int i = 0; i < 32; i++) if (max_vals[i] > cur_max) cur_max = max_vals[i]; target[blockIdx.x] = cur_max; } } __global__ void kSign(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] ? copysignf(1., mat[i]) : 0.; } } __global__ void kApplySigmoid(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = 1 / (1 + __expf(-mat[i])); } } __global__ void kApplyTanh(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i, exp2x; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; exp2x = __expf(2 * mat_i); target[i] = 1 - 2 / (exp2x + 1); } } __global__ void kApplyAbs(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0)); } } __global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; if (mat_i > 0) target[i] = (__logf(1 + __expf(-mat_i)) + mat_i); else target[i] = __logf(1 + __expf(mat_i)); } } __global__ void kLog(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = __logf(mat[i]); } } __global__ void kExp(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = __expf(mat[i]); } } __global__ void kSqrt(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = sqrt(mat[i]); } } __global__ void kPow(float* mat, float pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = powf(mat[i], pow); } } __global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = powf(mat[i], pow[i]); } } __global__ void kReciprocal(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1. / mat[i]; } __global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + vec[i % height]; } } __global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + vec[i / height]; } } __global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + mult * vec[i % height]; } } __global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] * vec[i % height]; } } __global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] * vec[i / height]; } } __global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] + b[i]; } } __global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] - b[i]; } } __global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] / b[i]; } } __global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] * b[i]; } } __global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha * mat[i]; } } __global__ void kAssignScalar(float* dest, float alpha, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha; } } __global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = mat[i] / alpha; } } __global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] + alpha; } } __global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){ __shared__ int sourceRowIndices[32]; const int startTargetRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startTargetRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ sourceRowIndices[tid] = int(indices[startTargetRowI + tid]); if (sourceRowIndices[tid]<0) sourceRowIndices[tid] += nSourceRows; if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows) sourceRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } } __global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){ __shared__ int targetRowIndices[32]; const int startSourceRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startSourceRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ targetRowIndices[tid] = int(indices[startSourceRowI + tid]); if (targetRowIndices[tid]<0) targetRowIndices[tid] += nTargetRows; if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows) targetRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } }
fe33e627906c47c4369003e00515c876773b3ead.cu
#include "cudamat_kernels.cuh" #include "float.h" /* ------------------------- Random number generation ------------------------- */ __global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // The initial x is the seed and the initial carry is 1 unsigned long long rndWord = ((unsigned long long)seed << 32) + 1; const unsigned int rndMult = rndMults[idx]; /* * Run the chain for a few steps so that all the streams have a chance * to differentiate. They start out generating similar random numbers * because all the multipliers are similar. */ for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); } rndWords[idx] = rndWord; } __global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long rndWord = rndWords[idx]; const unsigned int rndMult = rndMults[idx]; for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; } rndWords[idx] = rndWord; } __global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long rndWord = rndWords[idx]; const unsigned int rndMult = rndMults[idx]; float rnd1, rnd2, R, T; for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; T = 2 * PI * rnd2; R = sqrtf(-2 * __logf(rnd1)); gData[i] = R * __cosf(T); if (i + NUM_RND_STREAMS < numElements) gData[i + NUM_RND_STREAMS] = R * __sinf(T); } rndWords[idx] = rndWord; } /* ------------------------- Data copying ------------------------- */ /* Copy row slice from source to target. There is a block for every 32x32 chunk being copied. */ __global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) { const int row = start + blockIdx.x * 32 + threadIdx.x; const int start_col = blockIdx.y * 32; const int end_col = (start_col + 32 < width) ? start_col + 32: width; const int target_height = end - start; if (row < end) { for (int cur_col = start_col; cur_col < end_col; cur_col++) target[cur_col * target_height + row - start] = source[cur_col * height + row]; } } __global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) { const int row = start + blockIdx.x * 32 + threadIdx.x; const int start_col = blockIdx.y * 32; const int end_col = (start_col + 32 < width) ? start_col + 32: width; const int source_height = end - start; if (row < end) { for (int cur_col = start_col; cur_col < end_col; cur_col++) target[cur_col * height + row] = source[cur_col * source_height + row - start]; //source[cur_col * height + row - start] = target[cur_col * target_height + row]; } } __global__ void kTranspose(float *odata, float *idata, int width, int height) { __shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x; unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x; yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } /* ------------------------- Mathematical operations ------------------------- */ __global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] < mat2[i]; } } __global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] < val; } } __global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] > mat2[i]; } } __global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] > val; } } __global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float max_vals[32]; float cur_max = -FLT_MAX; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[blockIdx.x * height + i]; if (val > cur_max) cur_max = val; } max_vals[threadIdx.x] = cur_max; __syncthreads(); if (threadIdx.x == 0) { cur_max = -FLT_MAX; for (unsigned int i = 0; i < 32; i++) if (max_vals[i] > cur_max) cur_max = max_vals[i]; target[blockIdx.x] = cur_max; } } __global__ void kSign(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] ? copysignf(1., mat[i]) : 0.; } } __global__ void kApplySigmoid(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = 1 / (1 + __expf(-mat[i])); } } __global__ void kApplyTanh(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i, exp2x; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; exp2x = __expf(2 * mat_i); target[i] = 1 - 2 / (exp2x + 1); } } __global__ void kApplyAbs(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0)); } } __global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; if (mat_i > 0) target[i] = (__logf(1 + __expf(-mat_i)) + mat_i); else target[i] = __logf(1 + __expf(mat_i)); } } __global__ void kLog(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = __logf(mat[i]); } } __global__ void kExp(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = __expf(mat[i]); } } __global__ void kSqrt(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = sqrt(mat[i]); } } __global__ void kPow(float* mat, float pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = powf(mat[i], pow); } } __global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = powf(mat[i], pow[i]); } } __global__ void kReciprocal(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1. / mat[i]; } __global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + vec[i % height]; } } __global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + vec[i / height]; } } __global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + mult * vec[i % height]; } } __global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] * vec[i % height]; } } __global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] * vec[i / height]; } } __global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] + b[i]; } } __global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] - b[i]; } } __global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] / b[i]; } } __global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] * b[i]; } } __global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha * mat[i]; } } __global__ void kAssignScalar(float* dest, float alpha, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha; } } __global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = mat[i] / alpha; } } __global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] + alpha; } } __global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){ __shared__ int sourceRowIndices[32]; const int startTargetRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startTargetRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ sourceRowIndices[tid] = int(indices[startTargetRowI + tid]); if (sourceRowIndices[tid]<0) sourceRowIndices[tid] += nSourceRows; if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows) sourceRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } } __global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){ __shared__ int targetRowIndices[32]; const int startSourceRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startSourceRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ targetRowIndices[tid] = int(indices[startSourceRowI + tid]); if (targetRowIndices[tid]<0) targetRowIndices[tid] += nTargetRows; if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows) targetRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } }
cfb5e1a3e5cd44980dc13732f71f4c3424b9f2ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include<iostream> #include<stdio.h> #include<sys/time.h> #include<assert.h> using namespace std; #define REAL double #define BX 128 #define BY 2 #define BZ 1 #define GZ 1 const REAL cc = 0.4; const REAL ce = 0.1; const REAL cw = 0.1; const REAL cs = 0.1; const REAL cn = 0.1; const REAL ct = 0.1; const REAL cb = 0.1; //Must be re-written, including all the parameters int stencil(REAL *A, REAL *B, int nx, int ny, int nz, int steps) { int i, j, k, s; #define IDX(i,j,k) ((i)*ny*nz+(j)*nz+(k)) for(s = 0; s < steps; s ++) { for(i = 0; i < nx; i ++) { for(j = 0; j < ny; j ++) { for(k = 0; k < nz; k ++) { REAL r = 0.4*A[IDX(i,j,k)]; if(k != 0) r += 0.1*A[IDX(i,j,k-1)]; else r += 0.1*A[IDX(i,j,k)]; if(k != nz-1) r += 0.1*A[IDX(i,j,k+1)]; else r += 0.1*A[IDX(i,j,k)]; if(j != 0) r += 0.1*A[IDX(i,j-1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(j != ny-1) r += 0.1*A[IDX(i,j+1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != 0) r += 0.1*A[IDX(i-1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != nx-1) r += 0.1*A[IDX(i+1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; B[IDX(i,j,k)] = r; } } } REAL *tmp = NULL; tmp = A, A = B, B = tmp; } return 0; } void check(REAL *a, REAL *b, int nx, int ny, int nz) { int slice = nx * ny; for (int z = 1; z < nz-1; ++z) { for (int y = 1; y < ny-1; ++y) { for (int x = 1; x < nz-1; ++x) { int idx = z * slice + y * nx + x; if (abs(a[idx]-b[idx]) > 1e-5) { cout << a[idx] << " " << b[idx] << endl; printf("%d\n", idx); printf("Wrong!!!!!!!!\n"); return; } } } } printf("Right!!!!!!\n"); return; } __global__ void baseline(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; int k = kb; //int k = kb > 0? kb: 1; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; if(i>=0 && i<nx && j>=0 && j<ny){ //#pragma unroll for (; k < ke; k++){ int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int b = (k==0)?c:c-slice; int t = (k==nz-1)?c:c+slice; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*A[t] + cb*A[b] + cc*A[c]; c += slice; //if (k > 0 && k < nz-1 && i > 0 && i < nx-1 && j > 0 && j < ny-1){ // B[idx] = ce*A[idx+1] + cw*A[idx-1] + cs*A[idx+nx] + cn*A[idx-nx] // +ct*A[idx+slice] + cb*A[idx-slice] + cc*A[idx]; // idx += slice; } } } __global__ void baseopt(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; if(i>=0 && i<nx && j>=0 && j<ny){ #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; //b_t = B[idx+slice]; ////A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] //// +ct*B[idx+slice] + cb*B[idx-slice] + cc*B[idx]; //A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] // +ct*b_t + cb*b_b + cc*b_c; //b_b = b_c; //b_c = b_t; //idx += slice; } } return; } __global__ void roc(const REAL* __restrict__ A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; if(i>=0 && i<nx && j>=0 && j<ny){ #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; } } return; } int main(int argc, char **argv){ int NX = atoi(argv[2]); int NY = atoi(argv[3]); int NZ = atoi(argv[4]); int T = atoi(argv[5]); int num = 8; int NZ_ = NZ/num+2; if (NX*NY*NZ <= 600*600*600) { num = 1; NZ_ = NZ; } int p1, p2; if (NZ % num == 0) { p1 = p2 = NZ/num; } else { p1 = NZ / (num-1); p2 = NZ - p1*(num-1); } //int size = sizeof(REAL)*NX*NY*NZ; int partsize1 = NX*NY*p1; int partsize2 = NX*NY*p2; REAL *host_A, *host_B; int totalsize; if (num == 1) { totalsize = NX*NY*NZ; host_A = new REAL[totalsize]; host_B = new REAL[totalsize]; } else { totalsize = (partsize1+2*NX*NY)*(num-1)+partsize2; host_A = new REAL[totalsize]; host_B = new REAL[totalsize]; } int size_ = NZ_*NY*NX; REAL *cpu_A = new REAL[NX*NY*NZ]; REAL *result_A = new REAL[NX*NY*NZ]; REAL *cpu_B = new REAL[NX*NY*NZ]; for (int i = 0; i < totalsize; ++i) { host_A[i] = 1.0; host_B[i] = 1.0; } //for (int k = 0; k < NZ; k++) // for (int j = 0; j < NY; j++) // for (int i = 0; i < NX; i++) { // host_A[k*NY*NX+j*NX+i] = 1.0; // host_B[k*NY*NX+j*NX+i] = 1.0; // } for (int k = 0; k < NZ; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { //cout << k*NY*NX + j*NX + i << endl; cpu_A[k*NY*NX+j*NX+i] = 1.0; cpu_B[k*NY*NX+j*NX+i] = 1.0; result_A[k*NY*NX+j*NX+i] = 1.0; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); float elapsed_time; float elapsed_timecopy; double flops; int index = 0; int partsize; cout << "start gpu computing..." << endl; for (int i = 0; i < num; ++i) { REAL *dev_A, *dev_B; if (i == 0) { partsize = partsize1+NX*NY; NZ_ = p1+1; } else if (i < num-1) { partsize = partsize1+2*NX*NY; NZ_ = p1+2; } else { partsize = partsize2+NX*NY; NZ_ = p2+1; } if (num == 1) { partsize = NX*NY*NZ; NZ_ = NZ; } hipMalloc(&dev_A, sizeof(REAL)*partsize); hipMalloc(&dev_B, sizeof(REAL)*partsize); // hipEvent_t startcopy,stopcopy; // hipEventCreate(&startcopy); // hipEventCreate(&stopcopy); // hipEventRecord(startcopy, 0); hipMemcpy(dev_A, host_A+index, sizeof(REAL)*partsize, hipMemcpyHostToDevice); hipMemcpy(dev_B, host_B+index, sizeof(REAL)*partsize, hipMemcpyHostToDevice); // hipEventRecord(stopcopy,0); // hipEventSynchronize(stopcopy); // hipEventElapsedTime(&elapsed_timecopy, startcopy, stopcopy); dim3 threadPerBlock(BX, BY, BZ); //128,1,1 dim3 blockPerGrid((NX+BX-1)/BX, (NY+BY-1)/BY, GZ); //512/128,512/1,1 = 4,512,1 /////////////////////////////////////////////////////////////// //baseline for (int t = 0; t < T; t++){ hipLaunchKernelGGL(( roc), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, dev_A, dev_B, NX, NY, NZ_); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } /////////////////////////////////////////////////////////////// if (hipGetLastError() != hipSuccess) printf("cudawrong!!!\n"); hipMemcpy(host_A+index, dev_A, sizeof(REAL)*partsize, hipMemcpyDeviceToHost); index += partsize; hipFree(dev_A); hipFree(dev_B); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); struct timeval t1, t2; gettimeofday(&t1, NULL); // stencil(cpu_A, cpu_B, NX, NY, NZ, T); gettimeofday(&t2, NULL); float cpubaseline_time = (t2.tv_sec-t1.tv_sec)*1e3 + (t2.tv_usec-t1.tv_usec)*1e-3; cout << "CPU time:" << cpubaseline_time/T << " ms" << endl; /* if (num == 1) { check(cpu_A, host_A, NX, NY, NZ); } else { int index=0, partsize=0, idx=0; for (int i = 0; i < num; ++i) { if (i < num-1) partsize = partsize1; else partsize = partsize2; for (int j = 0; j < partsize; ++j) { result_A[idx] = host_A[index+j]; idx++; } index += partsize+2*NX*NY; } check(cpu_A, result_A, NX, NY, NZ); }*/ //printf("baseline: Gflops = %lf\n", flops); printf("baseline: elapsed time = %f ms\n", elapsed_time); // printf("baseline: elapsed timecopy = %f ms\n", elapsed_timecopy*num); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; /* /////////////////////////////////////////////////////////////// //baseopt hipEventRecord(start, 0); for (int t = 0; t < T; t++){ baseopt<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("baseopt: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("baseopt: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("baseopt: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //read-only data cache hipEventRecord(start, 0); for (int t = 0; t < T; t++){ roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("read-only data cache: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("read-only data cache: elapsed time = %f ms\n", elapsed_time); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("read-only data cache: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //share memory raw hipEventRecord(start, 0); for (int t = 0; t < T; t++){ shm_raw<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("share memory raw: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("share memory raw: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("share memory raw: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// hipEventDestroy(start); hipEventDestroy(stop); */ return 0; }
cfb5e1a3e5cd44980dc13732f71f4c3424b9f2ee.cu
#include<cuda_runtime.h> #include<iostream> #include<stdio.h> #include<sys/time.h> #include<assert.h> using namespace std; #define REAL double #define BX 128 #define BY 2 #define BZ 1 #define GZ 1 const REAL cc = 0.4; const REAL ce = 0.1; const REAL cw = 0.1; const REAL cs = 0.1; const REAL cn = 0.1; const REAL ct = 0.1; const REAL cb = 0.1; //Must be re-written, including all the parameters int stencil(REAL *A, REAL *B, int nx, int ny, int nz, int steps) { int i, j, k, s; #define IDX(i,j,k) ((i)*ny*nz+(j)*nz+(k)) for(s = 0; s < steps; s ++) { for(i = 0; i < nx; i ++) { for(j = 0; j < ny; j ++) { for(k = 0; k < nz; k ++) { REAL r = 0.4*A[IDX(i,j,k)]; if(k != 0) r += 0.1*A[IDX(i,j,k-1)]; else r += 0.1*A[IDX(i,j,k)]; if(k != nz-1) r += 0.1*A[IDX(i,j,k+1)]; else r += 0.1*A[IDX(i,j,k)]; if(j != 0) r += 0.1*A[IDX(i,j-1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(j != ny-1) r += 0.1*A[IDX(i,j+1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != 0) r += 0.1*A[IDX(i-1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != nx-1) r += 0.1*A[IDX(i+1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; B[IDX(i,j,k)] = r; } } } REAL *tmp = NULL; tmp = A, A = B, B = tmp; } return 0; } void check(REAL *a, REAL *b, int nx, int ny, int nz) { int slice = nx * ny; for (int z = 1; z < nz-1; ++z) { for (int y = 1; y < ny-1; ++y) { for (int x = 1; x < nz-1; ++x) { int idx = z * slice + y * nx + x; if (abs(a[idx]-b[idx]) > 1e-5) { cout << a[idx] << " " << b[idx] << endl; printf("%d\n", idx); printf("Wrong!!!!!!!!\n"); return; } } } } printf("Right!!!!!!\n"); return; } __global__ void baseline(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; int k = kb; //int k = kb > 0? kb: 1; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; if(i>=0 && i<nx && j>=0 && j<ny){ //#pragma unroll for (; k < ke; k++){ int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int b = (k==0)?c:c-slice; int t = (k==nz-1)?c:c+slice; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*A[t] + cb*A[b] + cc*A[c]; c += slice; //if (k > 0 && k < nz-1 && i > 0 && i < nx-1 && j > 0 && j < ny-1){ // B[idx] = ce*A[idx+1] + cw*A[idx-1] + cs*A[idx+nx] + cn*A[idx-nx] // +ct*A[idx+slice] + cb*A[idx-slice] + cc*A[idx]; // idx += slice; } } } __global__ void baseopt(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; if(i>=0 && i<nx && j>=0 && j<ny){ #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; //b_t = B[idx+slice]; ////A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] //// +ct*B[idx+slice] + cb*B[idx-slice] + cc*B[idx]; //A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] // +ct*b_t + cb*b_b + cc*b_c; //b_b = b_c; //b_c = b_t; //idx += slice; } } return; } __global__ void roc(const REAL* __restrict__ A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; if(i>=0 && i<nx && j>=0 && j<ny){ #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; } } return; } int main(int argc, char **argv){ int NX = atoi(argv[2]); int NY = atoi(argv[3]); int NZ = atoi(argv[4]); int T = atoi(argv[5]); int num = 8; int NZ_ = NZ/num+2; if (NX*NY*NZ <= 600*600*600) { num = 1; NZ_ = NZ; } int p1, p2; if (NZ % num == 0) { p1 = p2 = NZ/num; } else { p1 = NZ / (num-1); p2 = NZ - p1*(num-1); } //int size = sizeof(REAL)*NX*NY*NZ; int partsize1 = NX*NY*p1; int partsize2 = NX*NY*p2; REAL *host_A, *host_B; int totalsize; if (num == 1) { totalsize = NX*NY*NZ; host_A = new REAL[totalsize]; host_B = new REAL[totalsize]; } else { totalsize = (partsize1+2*NX*NY)*(num-1)+partsize2; host_A = new REAL[totalsize]; host_B = new REAL[totalsize]; } int size_ = NZ_*NY*NX; REAL *cpu_A = new REAL[NX*NY*NZ]; REAL *result_A = new REAL[NX*NY*NZ]; REAL *cpu_B = new REAL[NX*NY*NZ]; for (int i = 0; i < totalsize; ++i) { host_A[i] = 1.0; host_B[i] = 1.0; } //for (int k = 0; k < NZ; k++) // for (int j = 0; j < NY; j++) // for (int i = 0; i < NX; i++) { // host_A[k*NY*NX+j*NX+i] = 1.0; // host_B[k*NY*NX+j*NX+i] = 1.0; // } for (int k = 0; k < NZ; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { //cout << k*NY*NX + j*NX + i << endl; cpu_A[k*NY*NX+j*NX+i] = 1.0; cpu_B[k*NY*NX+j*NX+i] = 1.0; result_A[k*NY*NX+j*NX+i] = 1.0; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); float elapsed_time; float elapsed_timecopy; double flops; int index = 0; int partsize; cout << "start gpu computing..." << endl; for (int i = 0; i < num; ++i) { REAL *dev_A, *dev_B; if (i == 0) { partsize = partsize1+NX*NY; NZ_ = p1+1; } else if (i < num-1) { partsize = partsize1+2*NX*NY; NZ_ = p1+2; } else { partsize = partsize2+NX*NY; NZ_ = p2+1; } if (num == 1) { partsize = NX*NY*NZ; NZ_ = NZ; } cudaMalloc(&dev_A, sizeof(REAL)*partsize); cudaMalloc(&dev_B, sizeof(REAL)*partsize); // cudaEvent_t startcopy,stopcopy; // cudaEventCreate(&startcopy); // cudaEventCreate(&stopcopy); // cudaEventRecord(startcopy, 0); cudaMemcpy(dev_A, host_A+index, sizeof(REAL)*partsize, cudaMemcpyHostToDevice); cudaMemcpy(dev_B, host_B+index, sizeof(REAL)*partsize, cudaMemcpyHostToDevice); // cudaEventRecord(stopcopy,0); // cudaEventSynchronize(stopcopy); // cudaEventElapsedTime(&elapsed_timecopy, startcopy, stopcopy); dim3 threadPerBlock(BX, BY, BZ); //128,1,1 dim3 blockPerGrid((NX+BX-1)/BX, (NY+BY-1)/BY, GZ); //512/128,512/1,1 = 4,512,1 /////////////////////////////////////////////////////////////// //baseline for (int t = 0; t < T; t++){ roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ_); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } /////////////////////////////////////////////////////////////// if (cudaGetLastError() != cudaSuccess) printf("cudawrong!!!\n"); cudaMemcpy(host_A+index, dev_A, sizeof(REAL)*partsize, cudaMemcpyDeviceToHost); index += partsize; cudaFree(dev_A); cudaFree(dev_B); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); struct timeval t1, t2; gettimeofday(&t1, NULL); // stencil(cpu_A, cpu_B, NX, NY, NZ, T); gettimeofday(&t2, NULL); float cpubaseline_time = (t2.tv_sec-t1.tv_sec)*1e3 + (t2.tv_usec-t1.tv_usec)*1e-3; cout << "CPU time:" << cpubaseline_time/T << " ms" << endl; /* if (num == 1) { check(cpu_A, host_A, NX, NY, NZ); } else { int index=0, partsize=0, idx=0; for (int i = 0; i < num; ++i) { if (i < num-1) partsize = partsize1; else partsize = partsize2; for (int j = 0; j < partsize; ++j) { result_A[idx] = host_A[index+j]; idx++; } index += partsize+2*NX*NY; } check(cpu_A, result_A, NX, NY, NZ); }*/ //printf("baseline: Gflops = %lf\n", flops); printf("baseline: elapsed time = %f ms\n", elapsed_time); // printf("baseline: elapsed timecopy = %f ms\n", elapsed_timecopy*num); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; /* /////////////////////////////////////////////////////////////// //baseopt cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ baseopt<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("baseopt: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("baseopt: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("baseopt: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //read-only data cache cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("read-only data cache: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("read-only data cache: elapsed time = %f ms\n", elapsed_time); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("read-only data cache: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //share memory raw cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ shm_raw<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("share memory raw: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("share memory raw: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("share memory raw: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// cudaEventDestroy(start); cudaEventDestroy(stop); */ return 0; }
2f94074c77bca39a47407b030298dafd7fbefa9d.hip
// !!! This is a file automatically generated by hipify!!! #include "NeuralNet.h" #include <fstream> using namespace std; NeuralNet::NeuralNet(const unsigned int inputs, const unsigned int outputs, const unsigned int nLayers, const unsigned int neurons, const unsigned int batchSize) : numberOfLayers(nLayers), numberOfInputs(inputs), numberOfOutputs(outputs), neuronsPerLayer(neurons), outputLayer(nullptr, neurons, outputs, batchSize), batchSize(batchSize) { layers.reserve(numberOfLayers); createLayers(); outputLayer.d_inputs = layers.back().d_outputs; } void NeuralNet::createLayers() { layers.emplace_back(nullptr, numberOfInputs, neuronsPerLayer, batchSize); for (int i = 1; i < numberOfLayers; i++) { layers.emplace_back(layers.back().d_outputs, neuronsPerLayer, neuronsPerLayer, batchSize); } } double* NeuralNet::processInput(double* input) { layers[0].d_inputs = input; for (int i = 0; i < numberOfLayers; i++) { layers[i].processInput(); } outputLayer.processInput(); return outputLayer.d_outputs; } void NeuralNet::saveWeights(const std::string& fileName) { vector<double> weights; for_each(begin(layers), end(layers), [&](HiddenLayer& layer){ double* weightMatrix = new double[layer.numberOfNeurons*(layer.numberOfInputs + 1)]; CUDA_CALL(hipMemcpy(weightMatrix, layer.d_weightMatrix, layer.numberOfNeurons*(layer.numberOfInputs + 1)*sizeof(double), hipMemcpyDeviceToHost)); for (int neuron = 0; neuron < layer.numberOfNeurons; neuron++) { for (int weight = 0; weight < layer.numberOfInputs + 1; weight++) { weights.push_back(weightMatrix[neuron*(layer.numberOfInputs + 1) + weight]); } } delete[] weightMatrix; }); double* weightMatrix = new double[outputLayer.numberOfNeurons*(outputLayer.numberOfInputs + 1)]; CUDA_CALL(hipMemcpy(weightMatrix, outputLayer.d_weightMatrix, outputLayer.numberOfNeurons*(outputLayer.numberOfInputs + 1)*sizeof(double), hipMemcpyDeviceToHost)); for (int neuron = 0; neuron < outputLayer.numberOfNeurons; neuron++) { for (int weight = 0; weight < outputLayer.numberOfInputs + 1; weight++) { weights.push_back(weightMatrix[neuron*(outputLayer.numberOfInputs + 1) + weight]); } } delete[] weightMatrix; std::ofstream f(fileName); for_each( weights.begin(), weights.end(), [&](double x){ f << x << "\n";}); } void NeuralNet::readWeights(const std::string& fileName) { string str; vector<double> input; ifstream f(fileName); if(f.is_open()){ while(getline(f, str)) input.push_back(stod(str)); for_each(layers.begin(), layers.end(), [&](HiddenLayer& layer){ double* weightMatrix = new double[layer.numberOfNeurons*(layer.numberOfInputs + 1)]; for (int neuron = 0; neuron < layer.numberOfNeurons; neuron++) { for (int weight = 0; weight < layer.numberOfInputs + 1; weight++) { weightMatrix[neuron*(layer.numberOfInputs + 1) + weight] = input[0]; input.erase(input.begin()); } } CUDA_CALL(hipMemcpy(layer.d_weightMatrix, weightMatrix,layer.numberOfNeurons*(layer.numberOfInputs + 1)*sizeof(double), hipMemcpyHostToDevice)); delete[] weightMatrix; }); double* weightMatrix = new double[outputLayer.numberOfNeurons*(outputLayer.numberOfInputs + 1)]; for (int neuron = 0; neuron < outputLayer.numberOfNeurons; neuron++) { for (int weight = 0; weight < outputLayer.numberOfInputs + 1; weight++) { weightMatrix[neuron*(outputLayer.numberOfInputs + 1) + weight] = input[0]; input.erase(input.begin()); } } CUDA_CALL(hipMemcpy(outputLayer.d_weightMatrix, weightMatrix,outputLayer.numberOfNeurons*(outputLayer.numberOfInputs + 1)*sizeof(double), hipMemcpyHostToDevice)); delete[] weightMatrix; } else{ cout << "Could not read weights from file." << endl; } }
2f94074c77bca39a47407b030298dafd7fbefa9d.cu
#include "NeuralNet.h" #include <fstream> using namespace std; NeuralNet::NeuralNet(const unsigned int inputs, const unsigned int outputs, const unsigned int nLayers, const unsigned int neurons, const unsigned int batchSize) : numberOfLayers(nLayers), numberOfInputs(inputs), numberOfOutputs(outputs), neuronsPerLayer(neurons), outputLayer(nullptr, neurons, outputs, batchSize), batchSize(batchSize) { layers.reserve(numberOfLayers); createLayers(); outputLayer.d_inputs = layers.back().d_outputs; } void NeuralNet::createLayers() { layers.emplace_back(nullptr, numberOfInputs, neuronsPerLayer, batchSize); for (int i = 1; i < numberOfLayers; i++) { layers.emplace_back(layers.back().d_outputs, neuronsPerLayer, neuronsPerLayer, batchSize); } } double* NeuralNet::processInput(double* input) { layers[0].d_inputs = input; for (int i = 0; i < numberOfLayers; i++) { layers[i].processInput(); } outputLayer.processInput(); return outputLayer.d_outputs; } void NeuralNet::saveWeights(const std::string& fileName) { vector<double> weights; for_each(begin(layers), end(layers), [&](HiddenLayer& layer){ double* weightMatrix = new double[layer.numberOfNeurons*(layer.numberOfInputs + 1)]; CUDA_CALL(cudaMemcpy(weightMatrix, layer.d_weightMatrix, layer.numberOfNeurons*(layer.numberOfInputs + 1)*sizeof(double), cudaMemcpyDeviceToHost)); for (int neuron = 0; neuron < layer.numberOfNeurons; neuron++) { for (int weight = 0; weight < layer.numberOfInputs + 1; weight++) { weights.push_back(weightMatrix[neuron*(layer.numberOfInputs + 1) + weight]); } } delete[] weightMatrix; }); double* weightMatrix = new double[outputLayer.numberOfNeurons*(outputLayer.numberOfInputs + 1)]; CUDA_CALL(cudaMemcpy(weightMatrix, outputLayer.d_weightMatrix, outputLayer.numberOfNeurons*(outputLayer.numberOfInputs + 1)*sizeof(double), cudaMemcpyDeviceToHost)); for (int neuron = 0; neuron < outputLayer.numberOfNeurons; neuron++) { for (int weight = 0; weight < outputLayer.numberOfInputs + 1; weight++) { weights.push_back(weightMatrix[neuron*(outputLayer.numberOfInputs + 1) + weight]); } } delete[] weightMatrix; std::ofstream f(fileName); for_each( weights.begin(), weights.end(), [&](double x){ f << x << "\n";}); } void NeuralNet::readWeights(const std::string& fileName) { string str; vector<double> input; ifstream f(fileName); if(f.is_open()){ while(getline(f, str)) input.push_back(stod(str)); for_each(layers.begin(), layers.end(), [&](HiddenLayer& layer){ double* weightMatrix = new double[layer.numberOfNeurons*(layer.numberOfInputs + 1)]; for (int neuron = 0; neuron < layer.numberOfNeurons; neuron++) { for (int weight = 0; weight < layer.numberOfInputs + 1; weight++) { weightMatrix[neuron*(layer.numberOfInputs + 1) + weight] = input[0]; input.erase(input.begin()); } } CUDA_CALL(cudaMemcpy(layer.d_weightMatrix, weightMatrix,layer.numberOfNeurons*(layer.numberOfInputs + 1)*sizeof(double), cudaMemcpyHostToDevice)); delete[] weightMatrix; }); double* weightMatrix = new double[outputLayer.numberOfNeurons*(outputLayer.numberOfInputs + 1)]; for (int neuron = 0; neuron < outputLayer.numberOfNeurons; neuron++) { for (int weight = 0; weight < outputLayer.numberOfInputs + 1; weight++) { weightMatrix[neuron*(outputLayer.numberOfInputs + 1) + weight] = input[0]; input.erase(input.begin()); } } CUDA_CALL(cudaMemcpy(outputLayer.d_weightMatrix, weightMatrix,outputLayer.numberOfNeurons*(outputLayer.numberOfInputs + 1)*sizeof(double), cudaMemcpyHostToDevice)); delete[] weightMatrix; } else{ cout << "Could not read weights from file." << endl; } }
427bbf5aadf0364502c75cd71971f87fc0cf5777.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_xvel_plus_2_front; int xdim0_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int ydim0_update_halo_kernel2_xvel_plus_2_front; int ydim0_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int xdim1_update_halo_kernel2_xvel_plus_2_front; int xdim1_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int ydim1_update_halo_kernel2_xvel_plus_2_front; int ydim1_update_halo_kernel2_xvel_plus_2_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_xvel_plus_2_front * (y) + \ xdim0_update_halo_kernel2_xvel_plus_2_front * \ ydim0_update_halo_kernel2_xvel_plus_2_front * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_xvel_plus_2_front * (y) + \ xdim1_update_halo_kernel2_xvel_plus_2_front * \ ydim1_update_halo_kernel2_xvel_plus_2_front * (z)) // user function __device__ inline void update_halo_kernel2_xvel_plus_2_front(double *xvel0, double *xvel1, const int *fields) { if (fields[FIELD_XVEL0] == 1) xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, -2)]; if (fields[FIELD_XVEL1] == 1) xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, -2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_xvel_plus_2_front( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front + idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front * ydim0_update_halo_kernel2_xvel_plus_2_front; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front + idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front * ydim1_update_halo_kernel2_xvel_plus_2_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_xvel_plus_2_front(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_xvel_plus_2_front( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 80)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(80, "update_halo_kernel2_xvel_plus_2_front"); OPS_kernels[80].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_front_h || ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_front_h || xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_front_h || ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_front_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_front, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_xvel_plus_2_front_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_front, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_xvel_plus_2_front_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_front, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_xvel_plus_2_front_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_front, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_xvel_plus_2_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[80].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_2_front), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[80].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[80].mpi_time += t2 - t1; OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
427bbf5aadf0364502c75cd71971f87fc0cf5777.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_xvel_plus_2_front; int xdim0_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int ydim0_update_halo_kernel2_xvel_plus_2_front; int ydim0_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int xdim1_update_halo_kernel2_xvel_plus_2_front; int xdim1_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int ydim1_update_halo_kernel2_xvel_plus_2_front; int ydim1_update_halo_kernel2_xvel_plus_2_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_xvel_plus_2_front * (y) + \ xdim0_update_halo_kernel2_xvel_plus_2_front * \ ydim0_update_halo_kernel2_xvel_plus_2_front * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_xvel_plus_2_front * (y) + \ xdim1_update_halo_kernel2_xvel_plus_2_front * \ ydim1_update_halo_kernel2_xvel_plus_2_front * (z)) // user function __device__ inline void update_halo_kernel2_xvel_plus_2_front(double *xvel0, double *xvel1, const int *fields) { if (fields[FIELD_XVEL0] == 1) xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, -2)]; if (fields[FIELD_XVEL1] == 1) xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, -2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_xvel_plus_2_front( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front + idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front * ydim0_update_halo_kernel2_xvel_plus_2_front; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front + idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front * ydim1_update_halo_kernel2_xvel_plus_2_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_xvel_plus_2_front(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_xvel_plus_2_front( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 80)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(80, "update_halo_kernel2_xvel_plus_2_front"); OPS_kernels[80].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_front_h || ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_front_h || xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_front_h || ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_front_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_front, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_xvel_plus_2_front_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_front, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_xvel_plus_2_front_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_front, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_xvel_plus_2_front_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_front, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_xvel_plus_2_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[80].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_xvel_plus_2_front<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[80].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[80].mpi_time += t2 - t1; OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
b122ab3a5ed699586b3f5753a341793b69da7f77.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/PinnedMemoryAllocator.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/HIPSolver.h> #include <ATen/hip/HIPBlas.h> #include <ATen/hip/HIPEvent.h> #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/hip/MiscUtils.h> #include <ATen/native/hip/BatchLinearAlgebraLib.h> #ifdef USE_CUSOLVER namespace at { namespace native { inline static Tensor column_major_identity_matrix_like(const Tensor& self) { auto size = self.sizes(); auto size_slice = IntArrayRef(size.data(), size.size()-1); return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1); } template <typename scalar_t> inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_ptr, int n) { // self_inv_ptr should already be an identity matrix auto handle = at::cuda::getCurrentCUDASolverDnHandle(); at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, n, ipiv_ptr, info_ptr); at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, n, ipiv_ptr, self_inv_ptr, n, info_ptr); } template <typename scalar_t> static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos) { const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); const int n = cuda_int_cast(self.size(-2), "self.size(-2)"); auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto& allocator = *::c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); if (use_loop_launch(batch_size, n)) { int* p_infos = infos.data_ptr<int>(); auto main_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); for (int64_t i = 0; i < batch_size; i++) { auto stream = at::hip::getStreamFromPoolMasqueradingAsCUDA(); at::hip::HIPStreamGuardMasqueradingAsCUDA guard(stream); at::cuda::CUDAEvent can_start; can_start.record(main_stream); can_start.block(main_stream); int* pivot = reinterpret_cast<int*>(allocator.allocate(sizeof(int) * n).get()); _apply_single_inverse_helper<scalar_t>( &self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, p_infos + i, n); at::cuda::CUDAEvent finished; finished.record(stream); finished.block(main_stream); } } else { // cublas batched kernels require input be "device array of device pointers" Tensor self_array = at::arange( reinterpret_cast<long>(self_data), reinterpret_cast<long>(&self_data[(batch_size-1) * self_mat_stride]) + 1, static_cast<long>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); Tensor self_inv_array = at::arange( reinterpret_cast<long>(self_inv_data), reinterpret_cast<long>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1, static_cast<long>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); int* ipiv_array = reinterpret_cast<int*>(allocator.allocate(sizeof(int)*batch_size*n).get()); at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), n, ipiv_array, infos.data_ptr<int>(), batch_size); at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), n, ipiv_array, infos.data_ptr<int>(), batch_size, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr())); } } template <typename scalar_t> static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, int64_t& info) { int n = cuda_int_cast(self.size(-2), "self.size(-2)"); Tensor ipiv = at::empty({n}, self.options().dtype(at::kInt)); Tensor info_tmp = at::zeros({1}, self.options().dtype(at::kInt)); _apply_single_inverse_helper<scalar_t>( self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), info_tmp.data_ptr<int>(), n); info = info_tmp.item<int>(); } Tensor _inverse_helper_cuda_lib(const Tensor& self) { Tensor self_working_copy = cloneBatchedColumnMajor(self); Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy); const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); if (self.dim() > 2 && batch_size > 1) { Tensor infos = at::zeros({batchCount(self)}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( self_working_copy, self_inv_working_copy, infos); }); batchCheckErrors(infos, "inverse_cuda"); } else { int64_t info = 0; AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, info); }); singleCheckErrors(info, "inverse_cuda"); } return self_inv_working_copy; } }} // namespace at::native #endif // USE_CUSOLVER
b122ab3a5ed699586b3f5753a341793b69da7f77.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/PinnedMemoryAllocator.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/CUDASolver.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/CUDAEvent.h> #include <c10/cuda/CUDAStream.h> #include <ATen/native/LinearAlgebraUtils.h> #include <ATen/native/cuda/MiscUtils.h> #include <ATen/native/cuda/BatchLinearAlgebraLib.h> #ifdef USE_CUSOLVER namespace at { namespace native { inline static Tensor column_major_identity_matrix_like(const Tensor& self) { auto size = self.sizes(); auto size_slice = IntArrayRef(size.data(), size.size()-1); return at::ones(size_slice, self.options()).diag_embed().transpose(-2, -1); } template <typename scalar_t> inline static void _apply_single_inverse_helper(scalar_t* self_ptr, scalar_t* self_inv_ptr, int* ipiv_ptr, int* info_ptr, int n) { // self_inv_ptr should already be an identity matrix auto handle = at::cuda::getCurrentCUDASolverDnHandle(); at::cuda::solver::getrf<scalar_t>(handle, n, n, self_ptr, n, ipiv_ptr, info_ptr); at::cuda::solver::getrs<scalar_t>(handle, n, n, self_ptr, n, ipiv_ptr, self_inv_ptr, n, info_ptr); } template <typename scalar_t> static void apply_batched_inverse_lib(Tensor& self, Tensor& self_inv, Tensor& infos) { const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); const int n = cuda_int_cast(self.size(-2), "self.size(-2)"); auto self_data = self.data_ptr<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data_ptr<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); if (use_loop_launch(batch_size, n)) { int* p_infos = infos.data_ptr<int>(); auto main_stream = at::cuda::getCurrentCUDAStream(); for (int64_t i = 0; i < batch_size; i++) { auto stream = at::cuda::getStreamFromPool(); at::cuda::CUDAStreamGuard guard(stream); at::cuda::CUDAEvent can_start; can_start.record(main_stream); can_start.block(main_stream); int* pivot = reinterpret_cast<int*>(allocator.allocate(sizeof(int) * n).get()); _apply_single_inverse_helper<scalar_t>( &self_data[i * self_mat_stride], &self_inv_data[i * self_inv_mat_stride], pivot, p_infos + i, n); at::cuda::CUDAEvent finished; finished.record(stream); finished.block(main_stream); } } else { // cublas batched kernels require input be "device array of device pointers" Tensor self_array = at::arange( reinterpret_cast<long>(self_data), reinterpret_cast<long>(&self_data[(batch_size-1) * self_mat_stride]) + 1, static_cast<long>(self_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); Tensor self_inv_array = at::arange( reinterpret_cast<long>(self_inv_data), reinterpret_cast<long>(&self_inv_data[(batch_size-1) * self_inv_mat_stride]) + 1, static_cast<long>(self_inv_mat_stride * sizeof(scalar_t)), self.options().dtype(at::kLong)); int* ipiv_array = reinterpret_cast<int*>(allocator.allocate(sizeof(int)*batch_size*n).get()); at::cuda::blas::getrfBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), n, ipiv_array, infos.data_ptr<int>(), batch_size); at::cuda::blas::getriBatched<scalar_t>(n, reinterpret_cast<scalar_t**>(self_array.data_ptr()), n, ipiv_array, infos.data_ptr<int>(), batch_size, reinterpret_cast<scalar_t**>(self_inv_array.data_ptr())); } } template <typename scalar_t> static void apply_single_inverse_lib(const Tensor& self, Tensor& self_inv, int64_t& info) { int n = cuda_int_cast(self.size(-2), "self.size(-2)"); Tensor ipiv = at::empty({n}, self.options().dtype(at::kInt)); Tensor info_tmp = at::zeros({1}, self.options().dtype(at::kInt)); _apply_single_inverse_helper<scalar_t>( self.data_ptr<scalar_t>(), self_inv.data_ptr<scalar_t>(), ipiv.data_ptr<int>(), info_tmp.data_ptr<int>(), n); info = info_tmp.item<int>(); } Tensor _inverse_helper_cuda_lib(const Tensor& self) { Tensor self_working_copy = cloneBatchedColumnMajor(self); Tensor self_inv_working_copy = column_major_identity_matrix_like(self_working_copy); const int batch_size = cuda_int_cast(batchCount(self), "batchCount"); if (self.dim() > 2 && batch_size > 1) { Tensor infos = at::zeros({batchCount(self)}, self.options().dtype(kInt)); AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_batched_inverse_lib<scalar_t>( self_working_copy, self_inv_working_copy, infos); }); batchCheckErrors(infos, "inverse_cuda"); } else { int64_t info = 0; AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{ apply_single_inverse_lib<scalar_t>(self_working_copy, self_inv_working_copy, info); }); singleCheckErrors(info, "inverse_cuda"); } return self_inv_working_copy; } }} // namespace at::native #endif // USE_CUSOLVER
385da860c88b17962fc4c32d67d3260c39667aa8.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <glew.h> #include <GL/glut.h> #include <cuda_gl_interop.h> #include <stdio.h> #include <cmath> #include <fstream> #include <iostream> #include <vector> #include <strstream> #include "../include/DisplayCalculator.cuh" #include "../include/mat4x4.cuh" #include "../include/Mesh.cuh" #include "../include/mainCPU.cuh" #include "../include/defines.h" // includes CUDA #include <hip/hip_runtime.h> #include <helper_timer.h> #include <helper_cuda.h> #include <helper_math.h> using namespace std; namespace CPU { DisplayCalculator displayCalculator(true); char windowTitle[50]; float angleX = 0; float angleY = 0; float deltaTime = 0.033; void checkGLError() { GLenum err; while(( err = glGetError())){ std::cout << err; } } void updateWorldMatrix() { angleX += 0.5f*deltaTime; angleY += 0.7f*deltaTime; displayCalculator.mesh.SetWorldMatrix(getRotationMatrix(angleX, angleY, 0)); } void Display() { StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkStartTimer(&timer); glClearColor(1.0,0.0,1.0,1.0); glClear(GL_COLOR_BUFFER_BIT); glRasterPos2d(-1.0, -1.0); checkGLError(); updateWorldMatrix(); displayCalculator.mesh.UpdateMeshVertices(); displayCalculator.GenerateDisplay(); glDrawPixels(displayCalculator.mapWidth, displayCalculator.mapHeight, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, displayCalculator.colorMap); checkGLError(); glFlush(); glutSwapBuffers(); sdkStopTimer(&timer); deltaTime = sdkGetTimerValue(&timer)/1000.0; snprintf(windowTitle, 50, "Raycasting Triangles - %f FPS", 1.0/deltaTime); glutSetWindowTitle(windowTitle); sdkDeleteTimer(&timer); glutPostRedisplay(); } void Reshape(int width, int height) { glViewport(0,0,width, height); displayCalculator.mapWidth = width; displayCalculator.mapHeight = height; if(displayCalculator.colorMap != nullptr) { delete[] displayCalculator.colorMap; } displayCalculator.colorMap = new int[width*height]; displayCalculator.SetCameraFieldOfView(5.0f*width/height, 5.0f); printf("Width = %d, height = %d\n", width, height); glutPostRedisplay(); } void CreateDefaultMesh() { const int pointsLen = 4; const int trianglesLen = 12; float3 points[pointsLen] { make_float3(sqrt(8.f/9.f), 0.0f, -1.f/3.f), make_float3(-sqrt(2.f/9.f), sqrt(2.f/3.f), -1.f/3.f), make_float3(-sqrt(2.f/9.f), -sqrt(2.f/3.f), -1.f/3.f), make_float3(0.0f, 0.0f, 1.0f) }; int triangles[trianglesLen] { 2,0,1, 3,2,1, 2,3,0, 1,0,3 }; displayCalculator.mesh.SetPoints(points,pointsLen); displayCalculator.mesh.SetTriangles(triangles,trianglesLen); } bool LoadMeshFromFile(char * filename) { ifstream f(filename); if(!f.is_open()) { printf("failed to load from file %s\n", filename); return false; } vector<float3> vertices; vector<int> triangles; while(!f.eof()) { char line[128]; f.getline(line, 128); strstream s; s << line; char junk; if(line[0] == 'v') { float3 v; s >> junk >> v.x >> v.y >> v.z; vertices.push_back(v); } if(line[0] == 'f') { int a[3]; s >> junk >> a[0] >> a[1] >> a[2]; triangles.push_back(a[0]-1); triangles.push_back(a[1]-1); triangles.push_back(a[2]-1); } } displayCalculator.mesh.SetPoints(vertices.data(),vertices.size()); displayCalculator.mesh.SetTriangles(triangles.data(),triangles.size()); return true; } void SetMesh(int argc, char **argv) { if(argc != 3) { CreateDefaultMesh(); } else { if(!LoadMeshFromFile(argv[2])) { CreateDefaultMesh(); } } printf("triangles=%d, vertices=%d\n", displayCalculator.mesh.trianglesLength, displayCalculator.mesh.pointsLength); displayCalculator.SetCameraPosition(make_float3(0.0f, 0.0f, -5.0f)); displayCalculator.SetCameraFieldOfView(5.0f, 5.0f); displayCalculator.mesh.material.color=OBJECT_COLOR; displayCalculator.mesh.material.diffuse = DIFFUSE; displayCalculator.mesh.material.specular = SPECULAR; displayCalculator.mesh.material.smoothness = SMOOTHNESS; } void SetScene() { #ifdef MULTIPLE_LIGHTS for(int i = 0; i < LIGHT_COUNT; i++ ) { float3 color = make_float3(abs(0.5f-(float)i/LIGHT_COUNT)*2, sin(2*PI*((float)i/LIGHT_COUNT)), cos(2*PI*((float)i/LIGHT_COUNT))); color.y = (color.y+1)/2; color.z = (color.z+1)/2; color/=sqrt(LIGHT_COUNT); displayCalculator.sceneData.lights.push_back( Light(color, make_float3(1.5f*cos(2.0*PI*i/LIGHT_COUNT), 1.5f*sin(2.0*PI*i/LIGHT_COUNT), -3.0f)) ); } #endif #ifdef SINGLE_LIGHT displayCalculator.sceneData.lights.push_back( Light( make_float3(LIGHT_COLOR_X, LIGHT_COLOR_Y, LIGHT_COLOR_Z), make_float3(LIGHT_POS_X, LIGHT_POS_Y,LIGHT_POS_Z)) ); #endif } void StartGL(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); glutInitWindowSize(600, 600); glutCreateWindow("Raycasting triangles"); glewInit(); glutDisplayFunc(Display); glutReshapeFunc(Reshape); SetMesh(argc, argv); SetScene(); glutMainLoop(); } int mainCPU(int argc, char **argv) { StartGL(argc, argv); return 0; } }
385da860c88b17962fc4c32d67d3260c39667aa8.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <glew.h> #include <GL/glut.h> #include <cuda_gl_interop.h> #include <stdio.h> #include <cmath> #include <fstream> #include <iostream> #include <vector> #include <strstream> #include "../include/DisplayCalculator.cuh" #include "../include/mat4x4.cuh" #include "../include/Mesh.cuh" #include "../include/mainCPU.cuh" #include "../include/defines.h" // includes CUDA #include <cuda_runtime.h> #include <helper_timer.h> #include <helper_cuda.h> #include <helper_math.h> using namespace std; namespace CPU { DisplayCalculator displayCalculator(true); char windowTitle[50]; float angleX = 0; float angleY = 0; float deltaTime = 0.033; void checkGLError() { GLenum err; while(( err = glGetError())){ std::cout << err; } } void updateWorldMatrix() { angleX += 0.5f*deltaTime; angleY += 0.7f*deltaTime; displayCalculator.mesh.SetWorldMatrix(getRotationMatrix(angleX, angleY, 0)); } void Display() { StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkStartTimer(&timer); glClearColor(1.0,0.0,1.0,1.0); glClear(GL_COLOR_BUFFER_BIT); glRasterPos2d(-1.0, -1.0); checkGLError(); updateWorldMatrix(); displayCalculator.mesh.UpdateMeshVertices(); displayCalculator.GenerateDisplay(); glDrawPixels(displayCalculator.mapWidth, displayCalculator.mapHeight, GL_RGBA, GL_UNSIGNED_INT_8_8_8_8, displayCalculator.colorMap); checkGLError(); glFlush(); glutSwapBuffers(); sdkStopTimer(&timer); deltaTime = sdkGetTimerValue(&timer)/1000.0; snprintf(windowTitle, 50, "Raycasting Triangles - %f FPS", 1.0/deltaTime); glutSetWindowTitle(windowTitle); sdkDeleteTimer(&timer); glutPostRedisplay(); } void Reshape(int width, int height) { glViewport(0,0,width, height); displayCalculator.mapWidth = width; displayCalculator.mapHeight = height; if(displayCalculator.colorMap != nullptr) { delete[] displayCalculator.colorMap; } displayCalculator.colorMap = new int[width*height]; displayCalculator.SetCameraFieldOfView(5.0f*width/height, 5.0f); printf("Width = %d, height = %d\n", width, height); glutPostRedisplay(); } void CreateDefaultMesh() { const int pointsLen = 4; const int trianglesLen = 12; float3 points[pointsLen] { make_float3(sqrt(8.f/9.f), 0.0f, -1.f/3.f), make_float3(-sqrt(2.f/9.f), sqrt(2.f/3.f), -1.f/3.f), make_float3(-sqrt(2.f/9.f), -sqrt(2.f/3.f), -1.f/3.f), make_float3(0.0f, 0.0f, 1.0f) }; int triangles[trianglesLen] { 2,0,1, 3,2,1, 2,3,0, 1,0,3 }; displayCalculator.mesh.SetPoints(points,pointsLen); displayCalculator.mesh.SetTriangles(triangles,trianglesLen); } bool LoadMeshFromFile(char * filename) { ifstream f(filename); if(!f.is_open()) { printf("failed to load from file %s\n", filename); return false; } vector<float3> vertices; vector<int> triangles; while(!f.eof()) { char line[128]; f.getline(line, 128); strstream s; s << line; char junk; if(line[0] == 'v') { float3 v; s >> junk >> v.x >> v.y >> v.z; vertices.push_back(v); } if(line[0] == 'f') { int a[3]; s >> junk >> a[0] >> a[1] >> a[2]; triangles.push_back(a[0]-1); triangles.push_back(a[1]-1); triangles.push_back(a[2]-1); } } displayCalculator.mesh.SetPoints(vertices.data(),vertices.size()); displayCalculator.mesh.SetTriangles(triangles.data(),triangles.size()); return true; } void SetMesh(int argc, char **argv) { if(argc != 3) { CreateDefaultMesh(); } else { if(!LoadMeshFromFile(argv[2])) { CreateDefaultMesh(); } } printf("triangles=%d, vertices=%d\n", displayCalculator.mesh.trianglesLength, displayCalculator.mesh.pointsLength); displayCalculator.SetCameraPosition(make_float3(0.0f, 0.0f, -5.0f)); displayCalculator.SetCameraFieldOfView(5.0f, 5.0f); displayCalculator.mesh.material.color=OBJECT_COLOR; displayCalculator.mesh.material.diffuse = DIFFUSE; displayCalculator.mesh.material.specular = SPECULAR; displayCalculator.mesh.material.smoothness = SMOOTHNESS; } void SetScene() { #ifdef MULTIPLE_LIGHTS for(int i = 0; i < LIGHT_COUNT; i++ ) { float3 color = make_float3(abs(0.5f-(float)i/LIGHT_COUNT)*2, sin(2*PI*((float)i/LIGHT_COUNT)), cos(2*PI*((float)i/LIGHT_COUNT))); color.y = (color.y+1)/2; color.z = (color.z+1)/2; color/=sqrt(LIGHT_COUNT); displayCalculator.sceneData.lights.push_back( Light(color, make_float3(1.5f*cos(2.0*PI*i/LIGHT_COUNT), 1.5f*sin(2.0*PI*i/LIGHT_COUNT), -3.0f)) ); } #endif #ifdef SINGLE_LIGHT displayCalculator.sceneData.lights.push_back( Light( make_float3(LIGHT_COLOR_X, LIGHT_COLOR_Y, LIGHT_COLOR_Z), make_float3(LIGHT_POS_X, LIGHT_POS_Y,LIGHT_POS_Z)) ); #endif } void StartGL(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); glutInitWindowSize(600, 600); glutCreateWindow("Raycasting triangles"); glewInit(); glutDisplayFunc(Display); glutReshapeFunc(Reshape); SetMesh(argc, argv); SetScene(); glutMainLoop(); } int mainCPU(int argc, char **argv) { StartGL(argc, argv); return 0; } }
6de769e0de57316fb9b04f3c6f8607ef16f9db20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void memSetKernel( int nBatch,int rbs,int nDegree,int nDScale, int dbStopIdx,int dBaseScale, float regularize, float *data,float *dataRev, // array of data and reverse data float *R, // array of range // arrays pointer float *DA, float *RA, float *AA, float *BA, float *IA, float *CA, float *EA, float *SA // pointer of array of pointer to pointer of array in arrays, nevermind i just stun you. // p(i) = data(i + size(data)) ,float **DP, float **RP, float **AP, float **BP, float **IP, float **CP, float **EP, float **SP ) { int taskIdx = blockIdx.x * blockDim.x + threadIdx.x; if (taskIdx < nBatch) { // initialize domain arrays int nCoeff = ((nDegree - 1) * nDScale + 1); int dpOffset = (taskIdx * rbs * nCoeff); for(int i = 0; i < rbs; i++){ DA[dpOffset + i] = 1.0f; // power 0 } for(int i = 0; i < rbs; i++){ DA[dpOffset + i + rbs] = 0.0f; // power 1 } int dStartIdx = taskIdx % (nBatch/2); for(int ds = 1; ds <= nDScale; ds++){ // vec sumation int mapDStart = dStartIdx + (nDScale - ds) * (rbs/2); int dScale = dBaseScale * ds; // base_scale * current_scale for(int i = 0; i < dScale; i++){ for(int j = 0; j < rbs; j++){ if(taskIdx < (nBatch/2)){ DA[dpOffset + rbs*ds + j] = DA[dpOffset + rbs*ds + j] + data[mapDStart + j*dScale + i]; }else{ // gen reverse domain DA[dpOffset + rbs*ds + j] = DA[dpOffset + rbs*ds + j] + dataRev[mapDStart + j*dScale+ i]; } } } // vec scalig for(int j = 0; j < rbs; j++){ DA[dpOffset + rbs*ds + j] = DA[dpOffset + rbs*ds + j]/dScale; } } // calculate next degree for(int j = 2; j < nDegree; j++){ int degreePad = (j * rbs * nDScale ); for(int i = 0; i < rbs * nDScale; i++){ DA[i + dpOffset + rbs + degreePad] = DA[j + dpOffset + rbs] * DA[j + dpOffset + rbs + degreePad - rbs] ; // power n>=2 } } // initialize range and error arrays int rpOffset = (taskIdx * rbs); for(int j = 0; j < rbs; j++){ RA[rpOffset + j] = R[j]; EA[rpOffset + j] = R[j]; } // initialize covariance matrix with regularization int apOffset = (taskIdx * nCoeff * nCoeff); for(int i = 0; i < nCoeff * nCoeff; i+= nCoeff+1){ AA[apOffset + i] = regularize * regularize; // power 0 } // pointing section DP[taskIdx] = (DA + taskIdx * rbs * nCoeff); RP[taskIdx] = (RA + taskIdx * rbs); AP[taskIdx] = (AA + taskIdx * nCoeff * nCoeff); BP[taskIdx] = (BA + taskIdx * nCoeff); IP[taskIdx] = (IA + taskIdx * nCoeff * nCoeff); CP[taskIdx] = (CA + taskIdx * nCoeff); EP[taskIdx] = (EA + taskIdx * rbs); SP[taskIdx] = (SA + taskIdx); } }
6de769e0de57316fb9b04f3c6f8607ef16f9db20.cu
extern "C" __global__ void memSetKernel( int nBatch,int rbs,int nDegree,int nDScale, int dbStopIdx,int dBaseScale, float regularize, float *data,float *dataRev, // array of data and reverse data float *R, // array of range // arrays pointer float *DA, float *RA, float *AA, float *BA, float *IA, float *CA, float *EA, float *SA // pointer of array of pointer to pointer of array in arrays, nevermind i just stun you. // p(i) = data(i + size(data)) ,float **DP, float **RP, float **AP, float **BP, float **IP, float **CP, float **EP, float **SP ) { int taskIdx = blockIdx.x * blockDim.x + threadIdx.x; if (taskIdx < nBatch) { // initialize domain arrays int nCoeff = ((nDegree - 1) * nDScale + 1); int dpOffset = (taskIdx * rbs * nCoeff); for(int i = 0; i < rbs; i++){ DA[dpOffset + i] = 1.0f; // power 0 } for(int i = 0; i < rbs; i++){ DA[dpOffset + i + rbs] = 0.0f; // power 1 } int dStartIdx = taskIdx % (nBatch/2); for(int ds = 1; ds <= nDScale; ds++){ // vec sumation int mapDStart = dStartIdx + (nDScale - ds) * (rbs/2); int dScale = dBaseScale * ds; // base_scale * current_scale for(int i = 0; i < dScale; i++){ for(int j = 0; j < rbs; j++){ if(taskIdx < (nBatch/2)){ DA[dpOffset + rbs*ds + j] = DA[dpOffset + rbs*ds + j] + data[mapDStart + j*dScale + i]; }else{ // gen reverse domain DA[dpOffset + rbs*ds + j] = DA[dpOffset + rbs*ds + j] + dataRev[mapDStart + j*dScale+ i]; } } } // vec scalig for(int j = 0; j < rbs; j++){ DA[dpOffset + rbs*ds + j] = DA[dpOffset + rbs*ds + j]/dScale; } } // calculate next degree for(int j = 2; j < nDegree; j++){ int degreePad = (j * rbs * nDScale ); for(int i = 0; i < rbs * nDScale; i++){ DA[i + dpOffset + rbs + degreePad] = DA[j + dpOffset + rbs] * DA[j + dpOffset + rbs + degreePad - rbs] ; // power n>=2 } } // initialize range and error arrays int rpOffset = (taskIdx * rbs); for(int j = 0; j < rbs; j++){ RA[rpOffset + j] = R[j]; EA[rpOffset + j] = R[j]; } // initialize covariance matrix with regularization int apOffset = (taskIdx * nCoeff * nCoeff); for(int i = 0; i < nCoeff * nCoeff; i+= nCoeff+1){ AA[apOffset + i] = regularize * regularize; // power 0 } // pointing section DP[taskIdx] = (DA + taskIdx * rbs * nCoeff); RP[taskIdx] = (RA + taskIdx * rbs); AP[taskIdx] = (AA + taskIdx * nCoeff * nCoeff); BP[taskIdx] = (BA + taskIdx * nCoeff); IP[taskIdx] = (IA + taskIdx * nCoeff * nCoeff); CP[taskIdx] = (CA + taskIdx * nCoeff); EP[taskIdx] = (EA + taskIdx * rbs); SP[taskIdx] = (SA + taskIdx); } }
ac9b14e2c67b848988dcf6b7ba68121166af8802.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file knn_app.cu * * @brief Simple Gunrock Application */ // Gunrock api #include <gunrock/gunrock.h> // Test utils #include <gunrock/util/test_utils.cuh> // Graphio include #include <gunrock/graphio/graphio.cuh> // App and test base includes #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> // JSON includes #include <gunrock/util/info_rapidjson.cuh> // KNN includes #include <gunrock/app/knn/knn_enactor.cuh> #include <gunrock/app/knn/knn_test.cuh> namespace gunrock { namespace app { namespace knn { hipError_t UseParameters(util::Parameters &parameters) { hipError_t retval = hipSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<std::string>( "tag", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, "", "tag info for json string", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "k", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 10, "Numbers of k neighbors.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "x", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 0, "Index of reference point.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "y", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 0, "Index of reference point.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<float>( "cpu-elapsed", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0.0f, "CPU implementation, elapsed time (ms) for JSON.", __FILE__, __LINE__)); return retval; } /** * @brief Run knn tests * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph ... * @param[in] target where to perform the app * \return hipError_t error message(s), if any */ template <typename GraphT> hipError_t RunTests(util::Parameters &parameters, GraphT &graph, typename GraphT::SizeT k, typename GraphT::SizeT *h_knns, typename GraphT::SizeT *ref_knns, util::Location target) { hipError_t retval = hipSuccess; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::ValueT ValueT; typedef typename GraphT::SizeT SizeT; typedef Problem<GraphT> ProblemT; typedef Enactor<ProblemT> EnactorT; // CLI parameters bool quiet_mode = parameters.Get<bool>("quiet"); int num_runs = parameters.Get<int>("num-runs"); std::string validation = parameters.Get<std::string>("validation"); util::Info info("knn", parameters, graph); VertexT point_x = parameters.Get<int>("x"); VertexT point_y = parameters.Get<int>("y"); util::CpuTimer cpu_timer, total_timer; cpu_timer.Start(); total_timer.Start(); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; GUARD_CU(problem.Init(graph, k, target)); GUARD_CU(enactor.Init(problem, target)); cpu_timer.Stop(); parameters.Set("preprocess-time", cpu_timer.ElapsedMillis()); for (int run_num = 0; run_num < num_runs; ++run_num) { GUARD_CU(problem.Reset(point_x, point_y, k, target)); GUARD_CU(enactor.Reset(target)); util::PrintMsg("__________________________", !quiet_mode); cpu_timer.Start(); GUARD_CU(enactor.Enact()); cpu_timer.Stop(); info.CollectSingleRun(cpu_timer.ElapsedMillis()); util::PrintMsg( "--------------------------\nRun " + std::to_string(run_num) + " elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) + ", #iterations = " + std::to_string(enactor.enactor_slices[0].enactor_stats.iteration), !quiet_mode); if (validation == "each") { GUARD_CU(problem.Extract(graph.nodes, k, h_knns)); SizeT num_errors = Validate_Results(parameters, graph, h_knns, ref_knns, false); } } cpu_timer.Start(); GUARD_CU(problem.Extract(graph.nodes, k, h_knns)); if (validation == "last") { SizeT num_errors = Validate_Results(parameters, graph, h_knns, ref_knns, false); } // compute running statistics // Change NULL to problem specific per-vertex visited marker, e.g. // h_distances info.ComputeTraversalStats(enactor, (VertexT *)NULL); // Display_Memory_Usage(problem); #ifdef ENABLE_PERFORMANCE_PROFILING // Display_Performance_Profiling(enactor); #endif // Clean up GUARD_CU(enactor.Release(target)); GUARD_CU(problem.Release(target)); cpu_timer.Stop(); total_timer.Stop(); info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis()); return retval; } } // namespace knn } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
ac9b14e2c67b848988dcf6b7ba68121166af8802.cu
// ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file knn_app.cu * * @brief Simple Gunrock Application */ // Gunrock api #include <gunrock/gunrock.h> // Test utils #include <gunrock/util/test_utils.cuh> // Graphio include #include <gunrock/graphio/graphio.cuh> // App and test base includes #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> // JSON includes #include <gunrock/util/info_rapidjson.cuh> // KNN includes #include <gunrock/app/knn/knn_enactor.cuh> #include <gunrock/app/knn/knn_test.cuh> namespace gunrock { namespace app { namespace knn { cudaError_t UseParameters(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<std::string>( "tag", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, "", "tag info for json string", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "k", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 10, "Numbers of k neighbors.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "x", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 0, "Index of reference point.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "y", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 0, "Index of reference point.", __FILE__, __LINE__)); GUARD_CU(parameters.Use<float>( "cpu-elapsed", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0.0f, "CPU implementation, elapsed time (ms) for JSON.", __FILE__, __LINE__)); return retval; } /** * @brief Run knn tests * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph ... * @param[in] target where to perform the app * \return cudaError_t error message(s), if any */ template <typename GraphT> cudaError_t RunTests(util::Parameters &parameters, GraphT &graph, typename GraphT::SizeT k, typename GraphT::SizeT *h_knns, typename GraphT::SizeT *ref_knns, util::Location target) { cudaError_t retval = cudaSuccess; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::ValueT ValueT; typedef typename GraphT::SizeT SizeT; typedef Problem<GraphT> ProblemT; typedef Enactor<ProblemT> EnactorT; // CLI parameters bool quiet_mode = parameters.Get<bool>("quiet"); int num_runs = parameters.Get<int>("num-runs"); std::string validation = parameters.Get<std::string>("validation"); util::Info info("knn", parameters, graph); VertexT point_x = parameters.Get<int>("x"); VertexT point_y = parameters.Get<int>("y"); util::CpuTimer cpu_timer, total_timer; cpu_timer.Start(); total_timer.Start(); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; GUARD_CU(problem.Init(graph, k, target)); GUARD_CU(enactor.Init(problem, target)); cpu_timer.Stop(); parameters.Set("preprocess-time", cpu_timer.ElapsedMillis()); for (int run_num = 0; run_num < num_runs; ++run_num) { GUARD_CU(problem.Reset(point_x, point_y, k, target)); GUARD_CU(enactor.Reset(target)); util::PrintMsg("__________________________", !quiet_mode); cpu_timer.Start(); GUARD_CU(enactor.Enact()); cpu_timer.Stop(); info.CollectSingleRun(cpu_timer.ElapsedMillis()); util::PrintMsg( "--------------------------\nRun " + std::to_string(run_num) + " elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) + ", #iterations = " + std::to_string(enactor.enactor_slices[0].enactor_stats.iteration), !quiet_mode); if (validation == "each") { GUARD_CU(problem.Extract(graph.nodes, k, h_knns)); SizeT num_errors = Validate_Results(parameters, graph, h_knns, ref_knns, false); } } cpu_timer.Start(); GUARD_CU(problem.Extract(graph.nodes, k, h_knns)); if (validation == "last") { SizeT num_errors = Validate_Results(parameters, graph, h_knns, ref_knns, false); } // compute running statistics // Change NULL to problem specific per-vertex visited marker, e.g. // h_distances info.ComputeTraversalStats(enactor, (VertexT *)NULL); // Display_Memory_Usage(problem); #ifdef ENABLE_PERFORMANCE_PROFILING // Display_Performance_Profiling(enactor); #endif // Clean up GUARD_CU(enactor.Release(target)); GUARD_CU(problem.Release(target)); cpu_timer.Stop(); total_timer.Stop(); info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis()); return retval; } } // namespace knn } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
bc3ecfcdde8a8d5ac236896a8d7dd1cf00270e7b.hip
// !!! This is a file automatically generated by hipify!!! #define GRB_USE_CUDA #define private public #include <iostream> #include <algorithm> #include <string> #include <cstdio> #include <cstdlib> #include <boost/program_options.hpp> #include "graphblas/graphblas.hpp" #include "graphblas/backend/cuda/util.hpp" // GpuTimer #include "test/test.hpp" int main( int argc, char** argv ) { std::vector<graphblas::Index> row_indices; std::vector<graphblas::Index> col_indices; std::vector<float> values; graphblas::Index nrows, ncols, nvals; // Parse arguments bool debug; bool transpose; int directed; int niter; po::variables_map vm; // Read in sparse matrix if (argc < 2) { fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]); exit(1); } else { parseArgs(argc, argv, &vm); debug = vm["debug" ].as<bool>(); transpose = vm["transpose"].as<bool>(); directed = vm["directed" ].as<int>(); niter = vm["niter" ].as<int>(); readMtx(argv[argc-1], &row_indices, &col_indices, &values, &nrows, &ncols, &nvals, directed, debug); } // Matrix A graphblas::Matrix<float> a(nrows, ncols); CHECK( a.build(&row_indices, &col_indices, &values, nvals, GrB_NULL) ); CHECK( a.nrows(&nrows) ); CHECK( a.ncols(&ncols) ); CHECK( a.nvals(&nvals) ); if( debug ) CHECK( a.print() ); // Vector x graphblas::Vector<float> x(nrows); std::vector<graphblas::Index> x_ind = {0}; std::vector<float> x_val = {1.f}; CHECK( x.build(&x_ind, &x_val, 1, GrB_NULL) ); CHECK( x.size(&nrows) ); if( debug ) CHECK( x.print() ); // Vector y graphblas::Vector<float> y(nrows); // Vector mask graphblas::Vector<float> m(nrows); CHECK( m.fill(1.f) ); CHECK( m.setElement(-1.f, 0) ); CHECK( m.size(&nrows) ); // Descriptor graphblas::Descriptor desc; CHECK( desc.loadArgs(vm) ); CHECK( desc.set(graphblas::GrB_MASK, graphblas::GrB_SCMP) ); CHECK( desc.set(graphblas::GrB_MXVMODE, graphblas::GrB_PULLONLY) ); // Warmup CpuTimer warmup; warmup.Start(); graphblas::vxm<float, float, float, float>(&y, GrB_NULL, GrB_NULL, graphblas::PlusMultipliesSemiring<float>(), &m, &a, &desc); warmup.Stop(); std::vector<float> value(nrows,-1.f); std::vector<float> my_time; graphblas::backend::GpuTimer cpu_vxm; //hipProfilerStart(); cpu_vxm.Start(); graphblas::vxm<float, float, float, float>(&y, GrB_NULL, GrB_NULL, graphblas::PlusMultipliesSemiring<float>(), &m, &a, &desc); cpu_vxm.Stop(); my_time.push_back(cpu_vxm.ElapsedMillis()); for( int i=1000; i<nrows; i+=1000 ) { //m.clear(); m.build(&value, i); cpu_vxm.Start(); graphblas::vxm<float, float, float, float>( &y, GrB_NULL, GrB_NULL, graphblas::PlusMultipliesSemiring<float>(), &m, &a, &desc ); cpu_vxm.Stop(); my_time.push_back(cpu_vxm.ElapsedMillis()); } //hipProfilerStop(); float flop = 0; std::cout << "warmup, " << warmup.ElapsedMillis() << std::endl; for( int i=0; i<my_time.size(); i++ ) std::cout << (i)*1000 << ", " << my_time[i] << std::endl; if( debug ) y.print(); return 0; }
bc3ecfcdde8a8d5ac236896a8d7dd1cf00270e7b.cu
#define GRB_USE_CUDA #define private public #include <iostream> #include <algorithm> #include <string> #include <cstdio> #include <cstdlib> #include <boost/program_options.hpp> #include "graphblas/graphblas.hpp" #include "graphblas/backend/cuda/util.hpp" // GpuTimer #include "test/test.hpp" int main( int argc, char** argv ) { std::vector<graphblas::Index> row_indices; std::vector<graphblas::Index> col_indices; std::vector<float> values; graphblas::Index nrows, ncols, nvals; // Parse arguments bool debug; bool transpose; int directed; int niter; po::variables_map vm; // Read in sparse matrix if (argc < 2) { fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]); exit(1); } else { parseArgs(argc, argv, &vm); debug = vm["debug" ].as<bool>(); transpose = vm["transpose"].as<bool>(); directed = vm["directed" ].as<int>(); niter = vm["niter" ].as<int>(); readMtx(argv[argc-1], &row_indices, &col_indices, &values, &nrows, &ncols, &nvals, directed, debug); } // Matrix A graphblas::Matrix<float> a(nrows, ncols); CHECK( a.build(&row_indices, &col_indices, &values, nvals, GrB_NULL) ); CHECK( a.nrows(&nrows) ); CHECK( a.ncols(&ncols) ); CHECK( a.nvals(&nvals) ); if( debug ) CHECK( a.print() ); // Vector x graphblas::Vector<float> x(nrows); std::vector<graphblas::Index> x_ind = {0}; std::vector<float> x_val = {1.f}; CHECK( x.build(&x_ind, &x_val, 1, GrB_NULL) ); CHECK( x.size(&nrows) ); if( debug ) CHECK( x.print() ); // Vector y graphblas::Vector<float> y(nrows); // Vector mask graphblas::Vector<float> m(nrows); CHECK( m.fill(1.f) ); CHECK( m.setElement(-1.f, 0) ); CHECK( m.size(&nrows) ); // Descriptor graphblas::Descriptor desc; CHECK( desc.loadArgs(vm) ); CHECK( desc.set(graphblas::GrB_MASK, graphblas::GrB_SCMP) ); CHECK( desc.set(graphblas::GrB_MXVMODE, graphblas::GrB_PULLONLY) ); // Warmup CpuTimer warmup; warmup.Start(); graphblas::vxm<float, float, float, float>(&y, GrB_NULL, GrB_NULL, graphblas::PlusMultipliesSemiring<float>(), &m, &a, &desc); warmup.Stop(); std::vector<float> value(nrows,-1.f); std::vector<float> my_time; graphblas::backend::GpuTimer cpu_vxm; //cudaProfilerStart(); cpu_vxm.Start(); graphblas::vxm<float, float, float, float>(&y, GrB_NULL, GrB_NULL, graphblas::PlusMultipliesSemiring<float>(), &m, &a, &desc); cpu_vxm.Stop(); my_time.push_back(cpu_vxm.ElapsedMillis()); for( int i=1000; i<nrows; i+=1000 ) { //m.clear(); m.build(&value, i); cpu_vxm.Start(); graphblas::vxm<float, float, float, float>( &y, GrB_NULL, GrB_NULL, graphblas::PlusMultipliesSemiring<float>(), &m, &a, &desc ); cpu_vxm.Stop(); my_time.push_back(cpu_vxm.ElapsedMillis()); } //cudaProfilerStop(); float flop = 0; std::cout << "warmup, " << warmup.ElapsedMillis() << std::endl; for( int i=0; i<my_time.size(); i++ ) std::cout << (i)*1000 << ", " << my_time[i] << std::endl; if( debug ) y.print(); return 0; }
39f3d35f211a83a485faae4ae9ceef450738259f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<hiprand/hiprand_kernel.h> #include<hiprand/hiprand.h> #include<sys/time.h> unsigned int NUM_PARTICLES = 1000000; unsigned int NUM_ITERATIONS = 10; unsigned int BLOCK_SIZE = 192; //unsigned int GRID_SIZE = ((NUM_PARTICLES/BLOCK_SIZE) + 1); unsigned int NUM_STREAMS = 10; typedef struct { float3 posId; }position; typedef struct { float3 velId; }velocity; typedef struct { position pos; velocity vel; }Particle; void fill_data(Particle *p) { for(int i=0; i< NUM_PARTICLES; i++) { p[i].pos.posId.x = 10*((float)rand()/RAND_MAX); p[i].pos.posId.y = 10*((float)rand()/RAND_MAX); p[i].pos.posId.z = 10*((float)rand()/RAND_MAX); p[i].vel.velId.x = 100*((float)rand()/RAND_MAX); p[i].vel.velId.y = 100*((float)rand()/RAND_MAX); p[i].vel.velId.z = 100*((float)rand()/RAND_MAX); } } __global__ void particle_kernel_per_iteration(Particle *p, int offset, int streamSize) { int i = (blockIdx.x*blockDim.x)+threadIdx.x; if(i < streamSize) { p[offset + i].pos.posId.x += p[offset + i].vel.velId.x; p[offset + i].pos.posId.y += p[offset + i].vel.velId.y; p[offset + i].pos.posId.z += p[offset + i].vel.velId.z; } __syncthreads(); } void update_velocity_position_in_gpu(Particle *p) { struct timeval start_time; struct timeval stop_time; Particle *gPar = NULL; hipMalloc(&gPar, NUM_PARTICLES * sizeof(Particle)); unsigned long streamSize = NUM_PARTICLES/NUM_STREAMS; unsigned long streamBytes = streamSize * sizeof(Particle); hipStream_t stream[NUM_STREAMS]; for(int i=0; i<NUM_STREAMS; i++) hipStreamCreate(&stream[i]); //Start time gettimeofday(&start_time, NULL); #ifdef TYPE1 for(int i=0; i<NUM_ITERATIONS; i++) { for(int s=0; s<NUM_STREAMS; s++) { unsigned long offset = s * streamSize; // Copy Data to GPU Memory Asynchronously hipMemcpyAsync(&gPar[offset], &p[offset], streamBytes, hipMemcpyHostToDevice, stream[s]); //Launch kernel hipLaunchKernelGGL(( particle_kernel_per_iteration), dim3(((streamSize/BLOCK_SIZE) + 1)), dim3(BLOCK_SIZE), 0, stream[s], gPar, offset, streamSize); //hipDeviceSynchronize(); //Copy Data back to Host hipMemcpyAsync(&p[offset], &gPar[offset], streamBytes, hipMemcpyDeviceToHost, stream[s]); } hipDeviceSynchronize(); //Update Velocity in Host before copying data to GPU Memory for(int j=0; j<NUM_PARTICLES;j++) { p[j].vel.velId.x = 100*((float)rand()/RAND_MAX); p[j].vel.velId.y = 100*((float)rand()/RAND_MAX); p[j].vel.velId.z = 100*((float)rand()/RAND_MAX); } } #else for(int i=0; i<NUM_ITERATIONS; i++) { for(int s=0; s<NUM_STREAMS; s++) { unsigned long offset = s * streamSize; // Copy Data to GPU Memory Asynchronously hipMemcpyAsync(&gPar[offset], &p[offset], streamBytes, hipMemcpyHostToDevice, stream[s]); } for(int s=0; s<NUM_STREAMS; s++) { unsigned long offset = s * streamSize; //Launch kernel hipLaunchKernelGGL(( particle_kernel_per_iteration), dim3(((streamSize/BLOCK_SIZE) + 1)), dim3(BLOCK_SIZE), 0, stream[s], gPar, offset, streamSize); } for(int s=0; s<NUM_STREAMS; s++) { unsigned long offset = s * streamSize; //Copy Data back to Host hipMemcpyAsync(&p[offset], &gPar[offset], streamBytes, hipMemcpyDeviceToHost, stream[s]); } hipDeviceSynchronize(); //Update Velocity in Host before copying data to GPU Memory for(int j=0; j<NUM_PARTICLES;j++) { p[j].vel.velId.x = 100*((float)rand()/RAND_MAX); p[j].vel.velId.y = 100*((float)rand()/RAND_MAX); p[j].vel.velId.z = 100*((float)rand()/RAND_MAX); } } #endif //Stop time gettimeofday(&stop_time, NULL); for(int i=0; i<NUM_STREAMS; i++) hipStreamDestroy(stream[i]); hipFree(gPar); printf("Total time of Execution in GPU: %ld msec\n\n", ((stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec))/1000); } int main(int argc, char *argv[]) { if(argc != 3) { printf("No. of arguments to be passed should be 2 i.e. 1st as NUM_PARTICLES and 2nd as NUM_STREAMS\n"); exit(1); } NUM_PARTICLES = atoi(argv[1]); NUM_STREAMS = atoi(argv[2]); Particle *par = NULL; hipHostMalloc(&par, NUM_PARTICLES*sizeof(Particle)); fill_data(par); update_velocity_position_in_gpu(par); hipFree(par); return 0; }
39f3d35f211a83a485faae4ae9ceef450738259f.cu
#include<stdio.h> #include<stdlib.h> #include<curand_kernel.h> #include<curand.h> #include<sys/time.h> unsigned int NUM_PARTICLES = 1000000; unsigned int NUM_ITERATIONS = 10; unsigned int BLOCK_SIZE = 192; //unsigned int GRID_SIZE = ((NUM_PARTICLES/BLOCK_SIZE) + 1); unsigned int NUM_STREAMS = 10; typedef struct { float3 posId; }position; typedef struct { float3 velId; }velocity; typedef struct { position pos; velocity vel; }Particle; void fill_data(Particle *p) { for(int i=0; i< NUM_PARTICLES; i++) { p[i].pos.posId.x = 10*((float)rand()/RAND_MAX); p[i].pos.posId.y = 10*((float)rand()/RAND_MAX); p[i].pos.posId.z = 10*((float)rand()/RAND_MAX); p[i].vel.velId.x = 100*((float)rand()/RAND_MAX); p[i].vel.velId.y = 100*((float)rand()/RAND_MAX); p[i].vel.velId.z = 100*((float)rand()/RAND_MAX); } } __global__ void particle_kernel_per_iteration(Particle *p, int offset, int streamSize) { int i = (blockIdx.x*blockDim.x)+threadIdx.x; if(i < streamSize) { p[offset + i].pos.posId.x += p[offset + i].vel.velId.x; p[offset + i].pos.posId.y += p[offset + i].vel.velId.y; p[offset + i].pos.posId.z += p[offset + i].vel.velId.z; } __syncthreads(); } void update_velocity_position_in_gpu(Particle *p) { struct timeval start_time; struct timeval stop_time; Particle *gPar = NULL; cudaMalloc(&gPar, NUM_PARTICLES * sizeof(Particle)); unsigned long streamSize = NUM_PARTICLES/NUM_STREAMS; unsigned long streamBytes = streamSize * sizeof(Particle); cudaStream_t stream[NUM_STREAMS]; for(int i=0; i<NUM_STREAMS; i++) cudaStreamCreate(&stream[i]); //Start time gettimeofday(&start_time, NULL); #ifdef TYPE1 for(int i=0; i<NUM_ITERATIONS; i++) { for(int s=0; s<NUM_STREAMS; s++) { unsigned long offset = s * streamSize; // Copy Data to GPU Memory Asynchronously cudaMemcpyAsync(&gPar[offset], &p[offset], streamBytes, cudaMemcpyHostToDevice, stream[s]); //Launch kernel particle_kernel_per_iteration<<<((streamSize/BLOCK_SIZE) + 1), BLOCK_SIZE, 0, stream[s]>>>(gPar, offset, streamSize); //cudaDeviceSynchronize(); //Copy Data back to Host cudaMemcpyAsync(&p[offset], &gPar[offset], streamBytes, cudaMemcpyDeviceToHost, stream[s]); } cudaDeviceSynchronize(); //Update Velocity in Host before copying data to GPU Memory for(int j=0; j<NUM_PARTICLES;j++) { p[j].vel.velId.x = 100*((float)rand()/RAND_MAX); p[j].vel.velId.y = 100*((float)rand()/RAND_MAX); p[j].vel.velId.z = 100*((float)rand()/RAND_MAX); } } #else for(int i=0; i<NUM_ITERATIONS; i++) { for(int s=0; s<NUM_STREAMS; s++) { unsigned long offset = s * streamSize; // Copy Data to GPU Memory Asynchronously cudaMemcpyAsync(&gPar[offset], &p[offset], streamBytes, cudaMemcpyHostToDevice, stream[s]); } for(int s=0; s<NUM_STREAMS; s++) { unsigned long offset = s * streamSize; //Launch kernel particle_kernel_per_iteration<<<((streamSize/BLOCK_SIZE) + 1), BLOCK_SIZE, 0, stream[s]>>>(gPar, offset, streamSize); } for(int s=0; s<NUM_STREAMS; s++) { unsigned long offset = s * streamSize; //Copy Data back to Host cudaMemcpyAsync(&p[offset], &gPar[offset], streamBytes, cudaMemcpyDeviceToHost, stream[s]); } cudaDeviceSynchronize(); //Update Velocity in Host before copying data to GPU Memory for(int j=0; j<NUM_PARTICLES;j++) { p[j].vel.velId.x = 100*((float)rand()/RAND_MAX); p[j].vel.velId.y = 100*((float)rand()/RAND_MAX); p[j].vel.velId.z = 100*((float)rand()/RAND_MAX); } } #endif //Stop time gettimeofday(&stop_time, NULL); for(int i=0; i<NUM_STREAMS; i++) cudaStreamDestroy(stream[i]); cudaFree(gPar); printf("Total time of Execution in GPU: %ld msec\n\n", ((stop_time.tv_sec*1000000 + stop_time.tv_usec)-(start_time.tv_sec*1000000 + start_time.tv_usec))/1000); } int main(int argc, char *argv[]) { if(argc != 3) { printf("No. of arguments to be passed should be 2 i.e. 1st as NUM_PARTICLES and 2nd as NUM_STREAMS\n"); exit(1); } NUM_PARTICLES = atoi(argv[1]); NUM_STREAMS = atoi(argv[2]); Particle *par = NULL; cudaMallocHost(&par, NUM_PARTICLES*sizeof(Particle)); fill_data(par); update_velocity_position_in_gpu(par); cudaFree(par); return 0; }
6cc7c8090f9cac2eb3b14a1c78919bf44fdc808e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> // 80 x 80 //const int N = 80 * 80; // 160 x 160 //const int N = 160 * 160; // 320 x 320 //const int N = 320 * 320; // 640 x 640 //const int N = 640 * 640; // 1k x 4 //const int N = 1000; // 10k x 4 //const int N = 1000 * 10; // 100k x 4 //const int N = 1000 * 100; // 1M x 4 //const int N = 1000 * 1000; // 10M x 4 //const int N = 1000 * 1000 * 10; const int N = 1 << 20; //const int N = 1 << 10; #define FLTSIZE sizeof(float) inline int BLK(int data, int blocksize) { return (data + blocksize - 1) / blocksize; } __global__ void kernel_vectorAdd (const float* __restrict__ a_d, const float* __restrict__ b_d, const int N, const int offset, float *c_d) { int tid = threadIdx.x + __mul24(blockIdx.x, blockDim.x); if(tid < N) { c_d[tid + offset] = a_d[tid + offset] + b_d[tid + offset]; } } int main( int argc, char **argv) { int devid = 0 ; int num_streams = 8; if(argc >= 2) num_streams = atoi(argv[1]); if(argc >= 3) devid = atoi(argv[2]); hipSetDevice(devid); printf("\nrunning %d cuda streams on device %d\n", num_streams, devid); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, devid); printf("Device Number: %d\n", devid); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); // allocate streams hipStream_t *streams = (hipStream_t *) malloc(num_streams * sizeof(hipStream_t)); // init for (int i = 0; i < num_streams; i++) { checkCudaErrors(hipStreamCreate(&(streams[i]))); } //------------------------------------------------------------------------// // allocate data on the host //------------------------------------------------------------------------// size_t databytes = N * FLTSIZE; //float *a_h = (float*) malloc ( N * num_streams * FLTSIZE); //float *b_h = (float*) malloc ( N * num_streams * FLTSIZE); //float *c_h = (float*) malloc ( N * num_streams * FLTSIZE); float *a_h = NULL; checkCudaErrors(hipHostMalloc((void **)&a_h, N * num_streams * FLTSIZE)); float *b_h = NULL; checkCudaErrors(hipHostMalloc((void **)&b_h, N * num_streams * FLTSIZE)); float *c_h = NULL; checkCudaErrors(hipHostMalloc((void **)&c_h, N * num_streams * FLTSIZE)); for(int i=0; i< N * num_streams; i++) { a_h[i] = 1.1f; b_h[i] = 2.2f; } //------------------------------------------------------------------------// // allocate data on the device //------------------------------------------------------------------------// float *a_d; float *b_d; float *c_d; hipMalloc((void**)&a_d, N * num_streams * FLTSIZE); hipMalloc((void**)&b_d, N * num_streams * FLTSIZE); hipMalloc((void**)&c_d, N * num_streams * FLTSIZE); // kernel configuration dim3 threads = dim3(256, 1, 1); dim3 blocks = dim3(BLK(N, threads.x), 1, 1); // create cuda event handles hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); hipEventRecord(start,0); // copy data to deivce for (int i = 0; i < num_streams; i++) { int offset = i * N; hipMemcpyAsync(&a_d[offset], &a_h[offset], databytes, hipMemcpyHostToDevice, streams[i]); hipMemcpyAsync(&b_d[offset], &b_h[offset], databytes, hipMemcpyHostToDevice, streams[i]); } // launch one worker kernel per stream for (int i = 0; i < num_streams; i++) { int offset = i * N; hipLaunchKernelGGL(( kernel_vectorAdd) , dim3(blocks), dim3(threads), 0, streams[i] , a_d, b_d, N, offset, c_d); } // copy data back to host for (int i = 0; i < num_streams; i++) { int offset = i * N; hipMemcpyAsync(&c_h[offset], &c_d[offset], databytes, hipMemcpyDeviceToHost, streams[i]); } // required for async copy //hipDeviceSynchronize(); //hipEventSynchronize(stop); hipEventRecord(stop, 0); // have CPU do some work while waiting for stage 1 to finish unsigned long int counter=0; while (hipEventQuery(stop) == hipErrorNotReady) { counter++; } float gpuTime_ms= 0; hipEventElapsedTime(&gpuTime_ms, start, stop); printf("runtime (ms) : %f\n", gpuTime_ms); /* // check data bool success = 1; for(int i=0; i< N * num_streams; i++) { if (abs(c_h[i] - 3.3f) > 1e-6) { fprintf(stderr, "%d : %f (error)!\n", i, c_h[i]); success = 0; break; } } if(success) { printf("\nSuccess! Exit.\n"); } */ //------------------------------------------------------------------------// // free //------------------------------------------------------------------------// for (int i = 0; i < num_streams; i++) { checkCudaErrors(hipStreamDestroy(streams[i])); } checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); hipHostFree(a_h); hipHostFree(b_h); hipHostFree(c_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); hipDeviceReset(); return 0; }
6cc7c8090f9cac2eb3b14a1c78919bf44fdc808e.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> // 80 x 80 //const int N = 80 * 80; // 160 x 160 //const int N = 160 * 160; // 320 x 320 //const int N = 320 * 320; // 640 x 640 //const int N = 640 * 640; // 1k x 4 //const int N = 1000; // 10k x 4 //const int N = 1000 * 10; // 100k x 4 //const int N = 1000 * 100; // 1M x 4 //const int N = 1000 * 1000; // 10M x 4 //const int N = 1000 * 1000 * 10; const int N = 1 << 20; //const int N = 1 << 10; #define FLTSIZE sizeof(float) inline int BLK(int data, int blocksize) { return (data + blocksize - 1) / blocksize; } __global__ void kernel_vectorAdd (const float* __restrict__ a_d, const float* __restrict__ b_d, const int N, const int offset, float *c_d) { int tid = threadIdx.x + __mul24(blockIdx.x, blockDim.x); if(tid < N) { c_d[tid + offset] = a_d[tid + offset] + b_d[tid + offset]; } } int main( int argc, char **argv) { int devid = 0 ; int num_streams = 8; if(argc >= 2) num_streams = atoi(argv[1]); if(argc >= 3) devid = atoi(argv[2]); cudaSetDevice(devid); printf("\nrunning %d cuda streams on device %d\n", num_streams, devid); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, devid); printf("Device Number: %d\n", devid); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); // allocate streams cudaStream_t *streams = (cudaStream_t *) malloc(num_streams * sizeof(cudaStream_t)); // init for (int i = 0; i < num_streams; i++) { checkCudaErrors(cudaStreamCreate(&(streams[i]))); } //------------------------------------------------------------------------// // allocate data on the host //------------------------------------------------------------------------// size_t databytes = N * FLTSIZE; //float *a_h = (float*) malloc ( N * num_streams * FLTSIZE); //float *b_h = (float*) malloc ( N * num_streams * FLTSIZE); //float *c_h = (float*) malloc ( N * num_streams * FLTSIZE); float *a_h = NULL; checkCudaErrors(cudaMallocHost((void **)&a_h, N * num_streams * FLTSIZE)); float *b_h = NULL; checkCudaErrors(cudaMallocHost((void **)&b_h, N * num_streams * FLTSIZE)); float *c_h = NULL; checkCudaErrors(cudaMallocHost((void **)&c_h, N * num_streams * FLTSIZE)); for(int i=0; i< N * num_streams; i++) { a_h[i] = 1.1f; b_h[i] = 2.2f; } //------------------------------------------------------------------------// // allocate data on the device //------------------------------------------------------------------------// float *a_d; float *b_d; float *c_d; cudaMalloc((void**)&a_d, N * num_streams * FLTSIZE); cudaMalloc((void**)&b_d, N * num_streams * FLTSIZE); cudaMalloc((void**)&c_d, N * num_streams * FLTSIZE); // kernel configuration dim3 threads = dim3(256, 1, 1); dim3 blocks = dim3(BLK(N, threads.x), 1, 1); // create cuda event handles cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); cudaEventRecord(start,0); // copy data to deivce for (int i = 0; i < num_streams; i++) { int offset = i * N; cudaMemcpyAsync(&a_d[offset], &a_h[offset], databytes, cudaMemcpyHostToDevice, streams[i]); cudaMemcpyAsync(&b_d[offset], &b_h[offset], databytes, cudaMemcpyHostToDevice, streams[i]); } // launch one worker kernel per stream for (int i = 0; i < num_streams; i++) { int offset = i * N; kernel_vectorAdd <<< blocks, threads, 0, streams[i] >>> (a_d, b_d, N, offset, c_d); } // copy data back to host for (int i = 0; i < num_streams; i++) { int offset = i * N; cudaMemcpyAsync(&c_h[offset], &c_d[offset], databytes, cudaMemcpyDeviceToHost, streams[i]); } // required for async copy //cudaDeviceSynchronize(); //cudaEventSynchronize(stop); cudaEventRecord(stop, 0); // have CPU do some work while waiting for stage 1 to finish unsigned long int counter=0; while (cudaEventQuery(stop) == cudaErrorNotReady) { counter++; } float gpuTime_ms= 0; cudaEventElapsedTime(&gpuTime_ms, start, stop); printf("runtime (ms) : %f\n", gpuTime_ms); /* // check data bool success = 1; for(int i=0; i< N * num_streams; i++) { if (abs(c_h[i] - 3.3f) > 1e-6) { fprintf(stderr, "%d : %f (error)!\n", i, c_h[i]); success = 0; break; } } if(success) { printf("\nSuccess! Exit.\n"); } */ //------------------------------------------------------------------------// // free //------------------------------------------------------------------------// for (int i = 0; i < num_streams; i++) { checkCudaErrors(cudaStreamDestroy(streams[i])); } checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); cudaFreeHost(a_h); cudaFreeHost(b_h); cudaFreeHost(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaDeviceReset(); return 0; }
a2ebf63f443e7bd3ef80c2b4c4701ae749ccbf53.hip
// !!! This is a file automatically generated by hipify!!! #include <helper_cuda.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <omp.h> #include <hip/hip_runtime.h> #define THREADS_PER_DIM 16 #define BLOCKS_PER_DIM 16 #define THREADS_PER_BLOCK THREADS_PER_DIM *THREADS_PER_DIM #include "kmeans_hip_kernel.hip" //#define BLOCK_DELTA_REDUCE //#define BLOCK_CENTER_REDUCE #define CPU_DELTA_REDUCE #define CPU_CENTER_REDUCE extern "C" int setup(int argc, char **argv); /* function prototype */ // GLOBAL!!!!! unsigned int num_threads_perdim = THREADS_PER_DIM; /* sqrt(256) -- see references for this choice */ unsigned int num_blocks_perdim = BLOCKS_PER_DIM; /* temporary */ unsigned int num_threads = num_threads_perdim * num_threads_perdim; /* number of threads */ unsigned int num_blocks = num_blocks_perdim * num_blocks_perdim; /* number of blocks */ /* _d denotes it resides on the device */ int *membership_new; /* newly assignment membership */ float *feature_d; /* inverted data array */ float *feature_flipped_d; /* original (not inverted) data array */ int *membership_d; /* membership on the device */ float *block_new_centers; /* sum of points in a cluster (per block) */ float *clusters_d; /* cluster centers on the device */ float *block_clusters_d; /* per block calculation of cluster centers */ int *block_deltas_d; /* per block calculation of deltas */ /* -------------- allocateMemory() ------------------- */ /* allocate device memory, calculate number of blocks and threads, and invert * the data array */ extern "C" void allocateMemory(int npoints, int nfeatures, int nclusters, float **features) { num_blocks = npoints / num_threads; if (npoints % num_threads > 0) /* defeat truncation */ num_blocks++; num_blocks_perdim = sqrt((double)num_blocks); while (num_blocks_perdim * num_blocks_perdim < num_blocks) // defeat truncation (should run once) num_blocks_perdim++; num_blocks = num_blocks_perdim * num_blocks_perdim; /* allocate memory for memory_new[] and initialize to -1 (host) */ membership_new = (int *)malloc(npoints * sizeof(int)); for (int i = 0; i < npoints; i++) { membership_new[i] = -1; } /* allocate memory for block_new_centers[] (host) */ block_new_centers = (float *)malloc(nclusters * nfeatures * sizeof(float)); /* allocate memory for feature_flipped_d[][], feature_d[][] (device) */ checkCudaErrors(hipMalloc((void **)&feature_flipped_d, npoints * nfeatures * sizeof(float))); checkCudaErrors(hipMemcpy(feature_flipped_d, features[0], npoints * nfeatures * sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors( hipMalloc((void **)&feature_d, npoints * nfeatures * sizeof(float))); /* invert the data array (kernel execution) */ hipLaunchKernelGGL(( invert_mapping), dim3(num_blocks), dim3(num_threads), 0, 0, feature_flipped_d, feature_d, npoints, nfeatures); /* allocate memory for membership_d[] and clusters_d[][] (device) */ checkCudaErrors(hipMalloc((void **)&membership_d, npoints * sizeof(int))); checkCudaErrors(hipMalloc((void **)&clusters_d, nclusters * nfeatures * sizeof(float))); #ifdef BLOCK_DELTA_REDUCE // allocate array to hold the per block deltas on the gpu side checkCudaErrors( hipMalloc((void **)&block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int))); // hipMemcpy(block_delta_d, &delta_h, sizeof(int), hipMemcpyHostToDevice); #endif #ifdef BLOCK_CENTER_REDUCE // allocate memory and copy to card cluster array in which to accumulate // center points for the next iteration hipMalloc((void **)&block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); // hipMemcpy(new_clusters_d, new_centers[0], nclusters*nfeatures*sizeof(float), // hipMemcpyHostToDevice); #endif } /* -------------- allocateMemory() end ------------------- */ /* -------------- deallocateMemory() ------------------- */ /* free host and device memory */ extern "C" void deallocateMemory() { free(membership_new); free(block_new_centers); checkCudaErrors(hipFree(feature_d)); checkCudaErrors(hipFree(feature_flipped_d)); checkCudaErrors(hipFree(membership_d)); checkCudaErrors(hipFree(clusters_d)); #ifdef BLOCK_CENTER_REDUCE checkCudaErrors(hipFree(block_clusters_d)); #endif #ifdef BLOCK_DELTA_REDUCE checkCudaErrors(hipFree(block_deltas_d)); #endif } /* -------------- deallocateMemory() end ------------------- */ //////////////////////////////////////////////////////////////////////////////// // Program main // // int main(int argc, char **argv) { // as done in the CUDA start/help document provided setup(argc, argv); } // // //////////////////////////////////////////////////////////////////////////////// /* ------------------- kmeansCuda() ------------------------ */ extern "C" int // delta -- had problems when return value was of float type kmeansCuda(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, /* number of attributes for each point */ int npoints, /* number of data points */ int nclusters, /* number of clusters */ int *membership, /* which cluster the point belongs to */ float **clusters, /* coordinates of cluster centers */ int *new_centers_len, /* number of elements in each cluster */ float **new_centers /* sum of elements in each cluster */ ) { int delta = 0; /* if point has moved */ int i, j; /* counters */ /* copy membership (host to device) */ checkCudaErrors(hipMemcpy(membership_d, membership_new, npoints * sizeof(int), hipMemcpyHostToDevice)); /* copy clusters (host to device) */ checkCudaErrors(hipMemcpy(clusters_d, clusters[0], nclusters * nfeatures * sizeof(float), hipMemcpyHostToDevice)); /* set up texture */ hipChannelFormatDesc chDesc0 = hipCreateChannelDesc<float>(); t_features.filterMode = hipFilterModePoint; t_features.normalized = false; t_features.channelDesc = chDesc0; if (hipBindTexture(NULL, &t_features, feature_d, &chDesc0, npoints * nfeatures * sizeof(float)) != hipSuccess) printf("Couldn't bind features array to texture!\n"); hipChannelFormatDesc chDesc1 = hipCreateChannelDesc<float>(); t_features_flipped.filterMode = hipFilterModePoint; t_features_flipped.normalized = false; t_features_flipped.channelDesc = chDesc1; if (hipBindTexture(NULL, &t_features_flipped, feature_flipped_d, &chDesc1, npoints * nfeatures * sizeof(float)) != hipSuccess) printf("Couldn't bind features_flipped array to texture!\n"); hipChannelFormatDesc chDesc2 = hipCreateChannelDesc<float>(); t_clusters.filterMode = hipFilterModePoint; t_clusters.normalized = false; t_clusters.channelDesc = chDesc2; if (hipBindTexture(NULL, &t_clusters, clusters_d, &chDesc2, nclusters * nfeatures * sizeof(float)) != hipSuccess) printf("Couldn't bind clusters array to texture!\n"); /* copy clusters to constant memory */ checkCudaErrors(hipMemcpyToSymbol(c_clusters, clusters[0], nclusters * nfeatures * sizeof(float), 0, hipMemcpyHostToDevice)); /* setup execution parameters. changed to 2d (source code on NVIDIA CUDA Programming Guide) */ dim3 grid(num_blocks_perdim, num_blocks_perdim); dim3 threads(num_threads_perdim * num_threads_perdim); /* execute the kernel */ hipLaunchKernelGGL(( kmeansPoint), dim3(grid), dim3(threads), 0, 0, feature_d, nfeatures, npoints, nclusters, membership_d, clusters_d, block_clusters_d, block_deltas_d); checkCudaErrors(hipDeviceSynchronize()); /* copy back membership (device to host) */ hipMemcpy(membership_new, membership_d, npoints * sizeof(int), hipMemcpyDeviceToHost); #ifdef BLOCK_CENTER_REDUCE /*** Copy back arrays of per block sums ***/ float *block_clusters_h = (float *)malloc(num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); hipMemcpy(block_clusters_h, block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float), checkCudaErrors(hipMemcpyDeviceToHost)); #endif #ifdef BLOCK_DELTA_REDUCE int *block_deltas_h = (int *)malloc(num_blocks_perdim * num_blocks_perdim * sizeof(int)); hipMemcpy(block_deltas_h, block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int), checkCudaErrors(hipMemcpyDeviceToHost)); #endif /* for each point, sum data points in each cluster and see if membership has changed: if so, increase delta and change old membership, and update new_centers; otherwise, update new_centers */ delta = 0; for (i = 0; i < npoints; i++) { int cluster_id = membership_new[i]; new_centers_len[cluster_id]++; if (membership_new[i] != membership[i]) { #ifdef CPU_DELTA_REDUCE delta++; #endif membership[i] = membership_new[i]; } #ifdef CPU_CENTER_REDUCE for (j = 0; j < nfeatures; j++) { new_centers[cluster_id][j] += feature[i][j]; } #endif } #ifdef BLOCK_DELTA_REDUCE /*** calculate global sums from per block sums for delta and the new centers * ***/ // debug // printf("\t \t reducing %d block sums to global sum \n",num_blocks_perdim // * num_blocks_perdim); for (i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { // printf("block %d delta is %d \n",i,block_deltas_h[i]); delta += block_deltas_h[i]; } #endif #ifdef BLOCK_CENTER_REDUCE for (int j = 0; j < nclusters; j++) { for (int k = 0; k < nfeatures; k++) { block_new_centers[j * nfeatures + k] = 0.f; } } for (i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { for (int j = 0; j < nclusters; j++) { for (int k = 0; k < nfeatures; k++) { block_new_centers[j * nfeatures + k] += block_clusters_h[i * nclusters * nfeatures + j * nfeatures + k]; } } } #ifdef CPU_CENTER_REDUCE // debug /*for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { if(new_centers[j][k] > 1.001 * block_new_centers[j*nfeatures + k] || new_centers[j][k] < 0.999 * block_new_centers[j*nfeatures + k]) { printf("\t \t for %d:%d, normal value is %e and gpu reduced value id %e \n",j,k,new_centers[j][k],block_new_centers[j*nfeatures + k]); } } }*/ #endif #ifdef BLOCK_CENTER_REDUCE for (int j = 0; j < nclusters; j++) { for (int k = 0; k < nfeatures; k++) new_centers[j][k] = block_new_centers[j * nfeatures + k]; } #endif #endif return delta; } /* ------------------- kmeansCuda() end ------------------------ */
a2ebf63f443e7bd3ef80c2b4c4701ae749ccbf53.cu
#include <helper_cuda.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <omp.h> #include <cuda.h> #define THREADS_PER_DIM 16 #define BLOCKS_PER_DIM 16 #define THREADS_PER_BLOCK THREADS_PER_DIM *THREADS_PER_DIM #include "kmeans_cuda_kernel.cu" //#define BLOCK_DELTA_REDUCE //#define BLOCK_CENTER_REDUCE #define CPU_DELTA_REDUCE #define CPU_CENTER_REDUCE extern "C" int setup(int argc, char **argv); /* function prototype */ // GLOBAL!!!!! unsigned int num_threads_perdim = THREADS_PER_DIM; /* sqrt(256) -- see references for this choice */ unsigned int num_blocks_perdim = BLOCKS_PER_DIM; /* temporary */ unsigned int num_threads = num_threads_perdim * num_threads_perdim; /* number of threads */ unsigned int num_blocks = num_blocks_perdim * num_blocks_perdim; /* number of blocks */ /* _d denotes it resides on the device */ int *membership_new; /* newly assignment membership */ float *feature_d; /* inverted data array */ float *feature_flipped_d; /* original (not inverted) data array */ int *membership_d; /* membership on the device */ float *block_new_centers; /* sum of points in a cluster (per block) */ float *clusters_d; /* cluster centers on the device */ float *block_clusters_d; /* per block calculation of cluster centers */ int *block_deltas_d; /* per block calculation of deltas */ /* -------------- allocateMemory() ------------------- */ /* allocate device memory, calculate number of blocks and threads, and invert * the data array */ extern "C" void allocateMemory(int npoints, int nfeatures, int nclusters, float **features) { num_blocks = npoints / num_threads; if (npoints % num_threads > 0) /* defeat truncation */ num_blocks++; num_blocks_perdim = sqrt((double)num_blocks); while (num_blocks_perdim * num_blocks_perdim < num_blocks) // defeat truncation (should run once) num_blocks_perdim++; num_blocks = num_blocks_perdim * num_blocks_perdim; /* allocate memory for memory_new[] and initialize to -1 (host) */ membership_new = (int *)malloc(npoints * sizeof(int)); for (int i = 0; i < npoints; i++) { membership_new[i] = -1; } /* allocate memory for block_new_centers[] (host) */ block_new_centers = (float *)malloc(nclusters * nfeatures * sizeof(float)); /* allocate memory for feature_flipped_d[][], feature_d[][] (device) */ checkCudaErrors(cudaMalloc((void **)&feature_flipped_d, npoints * nfeatures * sizeof(float))); checkCudaErrors(cudaMemcpy(feature_flipped_d, features[0], npoints * nfeatures * sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors( cudaMalloc((void **)&feature_d, npoints * nfeatures * sizeof(float))); /* invert the data array (kernel execution) */ invert_mapping<<<num_blocks, num_threads>>>(feature_flipped_d, feature_d, npoints, nfeatures); /* allocate memory for membership_d[] and clusters_d[][] (device) */ checkCudaErrors(cudaMalloc((void **)&membership_d, npoints * sizeof(int))); checkCudaErrors(cudaMalloc((void **)&clusters_d, nclusters * nfeatures * sizeof(float))); #ifdef BLOCK_DELTA_REDUCE // allocate array to hold the per block deltas on the gpu side checkCudaErrors( cudaMalloc((void **)&block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int))); // cudaMemcpy(block_delta_d, &delta_h, sizeof(int), cudaMemcpyHostToDevice); #endif #ifdef BLOCK_CENTER_REDUCE // allocate memory and copy to card cluster array in which to accumulate // center points for the next iteration cudaMalloc((void **)&block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); // cudaMemcpy(new_clusters_d, new_centers[0], nclusters*nfeatures*sizeof(float), // cudaMemcpyHostToDevice); #endif } /* -------------- allocateMemory() end ------------------- */ /* -------------- deallocateMemory() ------------------- */ /* free host and device memory */ extern "C" void deallocateMemory() { free(membership_new); free(block_new_centers); checkCudaErrors(cudaFree(feature_d)); checkCudaErrors(cudaFree(feature_flipped_d)); checkCudaErrors(cudaFree(membership_d)); checkCudaErrors(cudaFree(clusters_d)); #ifdef BLOCK_CENTER_REDUCE checkCudaErrors(cudaFree(block_clusters_d)); #endif #ifdef BLOCK_DELTA_REDUCE checkCudaErrors(cudaFree(block_deltas_d)); #endif } /* -------------- deallocateMemory() end ------------------- */ //////////////////////////////////////////////////////////////////////////////// // Program main // // int main(int argc, char **argv) { // as done in the CUDA start/help document provided setup(argc, argv); } // // //////////////////////////////////////////////////////////////////////////////// /* ------------------- kmeansCuda() ------------------------ */ extern "C" int // delta -- had problems when return value was of float type kmeansCuda(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, /* number of attributes for each point */ int npoints, /* number of data points */ int nclusters, /* number of clusters */ int *membership, /* which cluster the point belongs to */ float **clusters, /* coordinates of cluster centers */ int *new_centers_len, /* number of elements in each cluster */ float **new_centers /* sum of elements in each cluster */ ) { int delta = 0; /* if point has moved */ int i, j; /* counters */ /* copy membership (host to device) */ checkCudaErrors(cudaMemcpy(membership_d, membership_new, npoints * sizeof(int), cudaMemcpyHostToDevice)); /* copy clusters (host to device) */ checkCudaErrors(cudaMemcpy(clusters_d, clusters[0], nclusters * nfeatures * sizeof(float), cudaMemcpyHostToDevice)); /* set up texture */ cudaChannelFormatDesc chDesc0 = cudaCreateChannelDesc<float>(); t_features.filterMode = cudaFilterModePoint; t_features.normalized = false; t_features.channelDesc = chDesc0; if (cudaBindTexture(NULL, &t_features, feature_d, &chDesc0, npoints * nfeatures * sizeof(float)) != CUDA_SUCCESS) printf("Couldn't bind features array to texture!\n"); cudaChannelFormatDesc chDesc1 = cudaCreateChannelDesc<float>(); t_features_flipped.filterMode = cudaFilterModePoint; t_features_flipped.normalized = false; t_features_flipped.channelDesc = chDesc1; if (cudaBindTexture(NULL, &t_features_flipped, feature_flipped_d, &chDesc1, npoints * nfeatures * sizeof(float)) != CUDA_SUCCESS) printf("Couldn't bind features_flipped array to texture!\n"); cudaChannelFormatDesc chDesc2 = cudaCreateChannelDesc<float>(); t_clusters.filterMode = cudaFilterModePoint; t_clusters.normalized = false; t_clusters.channelDesc = chDesc2; if (cudaBindTexture(NULL, &t_clusters, clusters_d, &chDesc2, nclusters * nfeatures * sizeof(float)) != CUDA_SUCCESS) printf("Couldn't bind clusters array to texture!\n"); /* copy clusters to constant memory */ checkCudaErrors(cudaMemcpyToSymbol(c_clusters, clusters[0], nclusters * nfeatures * sizeof(float), 0, cudaMemcpyHostToDevice)); /* setup execution parameters. changed to 2d (source code on NVIDIA CUDA Programming Guide) */ dim3 grid(num_blocks_perdim, num_blocks_perdim); dim3 threads(num_threads_perdim * num_threads_perdim); /* execute the kernel */ kmeansPoint<<<grid, threads>>>(feature_d, nfeatures, npoints, nclusters, membership_d, clusters_d, block_clusters_d, block_deltas_d); checkCudaErrors(cudaThreadSynchronize()); /* copy back membership (device to host) */ cudaMemcpy(membership_new, membership_d, npoints * sizeof(int), cudaMemcpyDeviceToHost); #ifdef BLOCK_CENTER_REDUCE /*** Copy back arrays of per block sums ***/ float *block_clusters_h = (float *)malloc(num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); cudaMemcpy(block_clusters_h, block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float), checkCudaErrors(cudaMemcpyDeviceToHost)); #endif #ifdef BLOCK_DELTA_REDUCE int *block_deltas_h = (int *)malloc(num_blocks_perdim * num_blocks_perdim * sizeof(int)); cudaMemcpy(block_deltas_h, block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int), checkCudaErrors(cudaMemcpyDeviceToHost)); #endif /* for each point, sum data points in each cluster and see if membership has changed: if so, increase delta and change old membership, and update new_centers; otherwise, update new_centers */ delta = 0; for (i = 0; i < npoints; i++) { int cluster_id = membership_new[i]; new_centers_len[cluster_id]++; if (membership_new[i] != membership[i]) { #ifdef CPU_DELTA_REDUCE delta++; #endif membership[i] = membership_new[i]; } #ifdef CPU_CENTER_REDUCE for (j = 0; j < nfeatures; j++) { new_centers[cluster_id][j] += feature[i][j]; } #endif } #ifdef BLOCK_DELTA_REDUCE /*** calculate global sums from per block sums for delta and the new centers * ***/ // debug // printf("\t \t reducing %d block sums to global sum \n",num_blocks_perdim // * num_blocks_perdim); for (i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { // printf("block %d delta is %d \n",i,block_deltas_h[i]); delta += block_deltas_h[i]; } #endif #ifdef BLOCK_CENTER_REDUCE for (int j = 0; j < nclusters; j++) { for (int k = 0; k < nfeatures; k++) { block_new_centers[j * nfeatures + k] = 0.f; } } for (i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { for (int j = 0; j < nclusters; j++) { for (int k = 0; k < nfeatures; k++) { block_new_centers[j * nfeatures + k] += block_clusters_h[i * nclusters * nfeatures + j * nfeatures + k]; } } } #ifdef CPU_CENTER_REDUCE // debug /*for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { if(new_centers[j][k] > 1.001 * block_new_centers[j*nfeatures + k] || new_centers[j][k] < 0.999 * block_new_centers[j*nfeatures + k]) { printf("\t \t for %d:%d, normal value is %e and gpu reduced value id %e \n",j,k,new_centers[j][k],block_new_centers[j*nfeatures + k]); } } }*/ #endif #ifdef BLOCK_CENTER_REDUCE for (int j = 0; j < nclusters; j++) { for (int k = 0; k < nfeatures; k++) new_centers[j][k] = block_new_centers[j * nfeatures + k]; } #endif #endif return delta; } /* ------------------- kmeansCuda() end ------------------------ */
d7229bced52d538fb5c0986adf924e91191762d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // This file contains C wrappers around the some of the CUDA API and the // kernel functions so that they can be called from "particleSystem.cpp" #include <cutil_inline.h> // includes cuda.h and hip/hip_runtime_api.h #include <shrQATest.h> #include <cstdlib> #include <cstdio> #include <string.h> #if defined(__APPLE__) || defined(MACOSX) #include <GLUT/glut.h> #else #include <GL/freeglut.h> #endif #include <cuda_gl_interop.h> #include "thrust/device_ptr.h" #include "thrust/for_each.h" #include "thrust/iterator/zip_iterator.h" #include "thrust/sort.h" #include "particles_kernel.cu" extern "C" { void cudaInit(int argc, char **argv) { int devID; // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) { devID = cutilDeviceInit(argc, argv); if (devID < 0) { printf("No CUDA Capable devices found, exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_WAIVED); } } else { devID = cutGetMaxGflopsDeviceId(); hipSetDevice( devID ); } } void cudaGLInit(int argc, char **argv) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) { cutilDeviceInit(argc, argv); } else { hipGLSetGLDevice( cutGetMaxGflopsDeviceId() ); } } void allocateArray(void **devPtr, size_t size) { cutilSafeCall(hipMalloc(devPtr, size)); } void freeArray(void *devPtr) { cutilSafeCall(hipFree(devPtr)); } void threadSync() { cutilSafeCall(cutilDeviceSynchronize()); } void copyArrayToDevice(void* device, const void* host, int offset, int size) { cutilSafeCall(hipMemcpy((char *) device + offset, host, size, hipMemcpyHostToDevice)); } void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource) { cutilSafeCall(hipGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo, hipGraphicsMapFlagsNone)); } void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource) { cutilSafeCall(hipGraphicsUnregisterResource(cuda_vbo_resource)); } void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource) { void *ptr; cutilSafeCall(hipGraphicsMapResources(1, cuda_vbo_resource, 0)); size_t num_bytes; cutilSafeCall(hipGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes, *cuda_vbo_resource)); return ptr; } void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource) { cutilSafeCall(hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0)); } void copyArrayFromDevice(void* host, const void* device, struct cudaGraphicsResource **cuda_vbo_resource, int size) { if (cuda_vbo_resource) device = mapGLBufferObject(cuda_vbo_resource); cutilSafeCall(hipMemcpy(host, device, size, hipMemcpyDeviceToHost)); if (cuda_vbo_resource) unmapGLBufferObject(*cuda_vbo_resource); } void setParameters(SimParams *hostParams) { // copy parameters to constant memory cutilSafeCall( hipMemcpyToSymbol(params, hostParams, sizeof(SimParams)) ); } //Round a / b to nearest higher integer value uint iDivUp(uint a, uint b){ return (a % b != 0) ? (a / b + 1) : (a / b); } // compute grid and thread block size for a given number of elements void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads) { numThreads = min(blockSize, n); numBlocks = iDivUp(n, numThreads); } void integrateSystem(float *pos, float *posTmp, float *vel, float deltaTime, uint numParticles) { thrust::device_ptr<float4> d_pos4((float4 *)pos); thrust::device_ptr<float4> d_posTmp4((float4 *)posTmp); thrust::device_ptr<float4> d_vel4((float4 *)vel); thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(d_pos4, d_posTmp4, d_vel4)), thrust::make_zip_iterator(thrust::make_tuple(d_pos4+numParticles, d_posTmp4+numParticles, d_vel4+numParticles)), integrate_functor(deltaTime)); } void calcHash(uint* gridParticleHash, uint* gridParticleIndex, float* posTmp, int numParticles) { uint numThreads, numBlocks; computeGridSize(numParticles, 256, numBlocks, numThreads); // execute the kernel hipLaunchKernelGGL(( calcHashD), dim3(numBlocks), dim3(numThreads) , 0, 0, gridParticleHash, gridParticleIndex, (float4 *) posTmp, numParticles); // check if kernel invocation generated an error cutilCheckMsg("Kernel execution failed"); } void reorderDataAndFindCellStart(uint* cellStart, uint* cellEnd, float* sortedPosTmp, uint* gridParticleHash, uint* gridParticleIndex, float* oldPosTmp, uint numParticles, uint numCells) { uint numThreads, numBlocks; computeGridSize(numParticles, 256, numBlocks, numThreads); // set all cells to empty cutilSafeCall(hipMemset(cellStart, 0xffffffff, numCells*sizeof(uint))); uint smemSize = sizeof(uint)*(numThreads+1); hipLaunchKernelGGL(( reorderDataAndFindCellStartD), dim3(numBlocks), dim3(numThreads), smemSize, 0, cellStart, cellEnd, (float4 *) sortedPosTmp, gridParticleHash, gridParticleIndex, (float4 *) oldPosTmp, numParticles); cutilCheckMsg("Kernel execution failed: reorderDataAndFindCellStartD"); } void calLambda(float* sortedPosTmp, float* lambda, uint* gridParticleIndex, uint* cellStart, uint* cellEnd, uint numParticles, uint numCells) { // thread per particle uint numThreads, numBlocks; computeGridSize(numParticles, 64, numBlocks, numThreads); // execute the kernel hipLaunchKernelGGL(( calLambdaD), dim3(numBlocks), dim3(numThreads) , 0, 0, (float4*)sortedPosTmp, (float*)lambda, gridParticleIndex, cellStart, cellEnd, numParticles); // check if kernel invocation generated an error cutilCheckMsg("Kernel execution failed"); } void calDeltaP_Collision_UpdatePos(float* sortedPosTmp, float* sortedPosTmp2, float* lambda, uint* gridParticleIndex, uint* cellStart, uint* cellEnd, uint numParticles, uint numCells) { // thread per particle uint numThreads, numBlocks; computeGridSize(numParticles, 64, numBlocks, numThreads); // execute the kernel hipLaunchKernelGGL(( calDeltaP_Collision_UpdatePosD), dim3(numBlocks), dim3(numThreads) , 0, 0, (float4*)sortedPosTmp, (float4*)sortedPosTmp2, (float*)lambda, gridParticleIndex, cellStart, cellEnd, numParticles); cutilSafeCall(hipMemcpy(sortedPosTmp, sortedPosTmp2, sizeof(float4) * numParticles, hipMemcpyDeviceToDevice)); // check if kernel invocation generated an error cutilCheckMsg("Kernel execution failed"); } void updateVelPos(float deltaTime, float* newPos, float* newVel, float* sortedPosTmp, uint* gridParticleIndex, uint* cellStart, uint* cellEnd, uint numParticles, uint numCells) { // thread per particle uint numThreads, numBlocks; computeGridSize(numParticles, 64, numBlocks, numThreads); // execute the kernel hipLaunchKernelGGL(( updateVelPosD), dim3(numBlocks), dim3(numThreads) , 0, 0, deltaTime, (float4*)newPos, (float4*)newVel, (float4*)sortedPosTmp, gridParticleIndex, cellStart, cellEnd, numParticles); // check if kernel invocation generated an error cutilCheckMsg("Kernel execution failed"); } void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles) { thrust::sort_by_key(thrust::device_ptr<uint>(dGridParticleHash), thrust::device_ptr<uint>(dGridParticleHash + numParticles), thrust::device_ptr<uint>(dGridParticleIndex)); } } // extern "C"
d7229bced52d538fb5c0986adf924e91191762d6.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // This file contains C wrappers around the some of the CUDA API and the // kernel functions so that they can be called from "particleSystem.cpp" #include <cutil_inline.h> // includes cuda.h and cuda_runtime_api.h #include <shrQATest.h> #include <cstdlib> #include <cstdio> #include <string.h> #if defined(__APPLE__) || defined(MACOSX) #include <GLUT/glut.h> #else #include <GL/freeglut.h> #endif #include <cuda_gl_interop.h> #include "thrust/device_ptr.h" #include "thrust/for_each.h" #include "thrust/iterator/zip_iterator.h" #include "thrust/sort.h" #include "particles_kernel.cu" extern "C" { void cudaInit(int argc, char **argv) { int devID; // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) { devID = cutilDeviceInit(argc, argv); if (devID < 0) { printf("No CUDA Capable devices found, exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_WAIVED); } } else { devID = cutGetMaxGflopsDeviceId(); cudaSetDevice( devID ); } } void cudaGLInit(int argc, char **argv) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) { cutilDeviceInit(argc, argv); } else { cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() ); } } void allocateArray(void **devPtr, size_t size) { cutilSafeCall(cudaMalloc(devPtr, size)); } void freeArray(void *devPtr) { cutilSafeCall(cudaFree(devPtr)); } void threadSync() { cutilSafeCall(cutilDeviceSynchronize()); } void copyArrayToDevice(void* device, const void* host, int offset, int size) { cutilSafeCall(cudaMemcpy((char *) device + offset, host, size, cudaMemcpyHostToDevice)); } void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource) { cutilSafeCall(cudaGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo, cudaGraphicsMapFlagsNone)); } void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource) { cutilSafeCall(cudaGraphicsUnregisterResource(cuda_vbo_resource)); } void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource) { void *ptr; cutilSafeCall(cudaGraphicsMapResources(1, cuda_vbo_resource, 0)); size_t num_bytes; cutilSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes, *cuda_vbo_resource)); return ptr; } void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource) { cutilSafeCall(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0)); } void copyArrayFromDevice(void* host, const void* device, struct cudaGraphicsResource **cuda_vbo_resource, int size) { if (cuda_vbo_resource) device = mapGLBufferObject(cuda_vbo_resource); cutilSafeCall(cudaMemcpy(host, device, size, cudaMemcpyDeviceToHost)); if (cuda_vbo_resource) unmapGLBufferObject(*cuda_vbo_resource); } void setParameters(SimParams *hostParams) { // copy parameters to constant memory cutilSafeCall( cudaMemcpyToSymbol(params, hostParams, sizeof(SimParams)) ); } //Round a / b to nearest higher integer value uint iDivUp(uint a, uint b){ return (a % b != 0) ? (a / b + 1) : (a / b); } // compute grid and thread block size for a given number of elements void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads) { numThreads = min(blockSize, n); numBlocks = iDivUp(n, numThreads); } void integrateSystem(float *pos, float *posTmp, float *vel, float deltaTime, uint numParticles) { thrust::device_ptr<float4> d_pos4((float4 *)pos); thrust::device_ptr<float4> d_posTmp4((float4 *)posTmp); thrust::device_ptr<float4> d_vel4((float4 *)vel); thrust::for_each( thrust::make_zip_iterator(thrust::make_tuple(d_pos4, d_posTmp4, d_vel4)), thrust::make_zip_iterator(thrust::make_tuple(d_pos4+numParticles, d_posTmp4+numParticles, d_vel4+numParticles)), integrate_functor(deltaTime)); } void calcHash(uint* gridParticleHash, uint* gridParticleIndex, float* posTmp, int numParticles) { uint numThreads, numBlocks; computeGridSize(numParticles, 256, numBlocks, numThreads); // execute the kernel calcHashD<<< numBlocks, numThreads >>>(gridParticleHash, gridParticleIndex, (float4 *) posTmp, numParticles); // check if kernel invocation generated an error cutilCheckMsg("Kernel execution failed"); } void reorderDataAndFindCellStart(uint* cellStart, uint* cellEnd, float* sortedPosTmp, uint* gridParticleHash, uint* gridParticleIndex, float* oldPosTmp, uint numParticles, uint numCells) { uint numThreads, numBlocks; computeGridSize(numParticles, 256, numBlocks, numThreads); // set all cells to empty cutilSafeCall(cudaMemset(cellStart, 0xffffffff, numCells*sizeof(uint))); uint smemSize = sizeof(uint)*(numThreads+1); reorderDataAndFindCellStartD<<< numBlocks, numThreads, smemSize>>>( cellStart, cellEnd, (float4 *) sortedPosTmp, gridParticleHash, gridParticleIndex, (float4 *) oldPosTmp, numParticles); cutilCheckMsg("Kernel execution failed: reorderDataAndFindCellStartD"); } void calLambda(float* sortedPosTmp, float* lambda, uint* gridParticleIndex, uint* cellStart, uint* cellEnd, uint numParticles, uint numCells) { // thread per particle uint numThreads, numBlocks; computeGridSize(numParticles, 64, numBlocks, numThreads); // execute the kernel calLambdaD<<< numBlocks, numThreads >>>((float4*)sortedPosTmp, (float*)lambda, gridParticleIndex, cellStart, cellEnd, numParticles); // check if kernel invocation generated an error cutilCheckMsg("Kernel execution failed"); } void calDeltaP_Collision_UpdatePos(float* sortedPosTmp, float* sortedPosTmp2, float* lambda, uint* gridParticleIndex, uint* cellStart, uint* cellEnd, uint numParticles, uint numCells) { // thread per particle uint numThreads, numBlocks; computeGridSize(numParticles, 64, numBlocks, numThreads); // execute the kernel calDeltaP_Collision_UpdatePosD<<< numBlocks, numThreads >>>((float4*)sortedPosTmp, (float4*)sortedPosTmp2, (float*)lambda, gridParticleIndex, cellStart, cellEnd, numParticles); cutilSafeCall(cudaMemcpy(sortedPosTmp, sortedPosTmp2, sizeof(float4) * numParticles, cudaMemcpyDeviceToDevice)); // check if kernel invocation generated an error cutilCheckMsg("Kernel execution failed"); } void updateVelPos(float deltaTime, float* newPos, float* newVel, float* sortedPosTmp, uint* gridParticleIndex, uint* cellStart, uint* cellEnd, uint numParticles, uint numCells) { // thread per particle uint numThreads, numBlocks; computeGridSize(numParticles, 64, numBlocks, numThreads); // execute the kernel updateVelPosD<<< numBlocks, numThreads >>>(deltaTime, (float4*)newPos, (float4*)newVel, (float4*)sortedPosTmp, gridParticleIndex, cellStart, cellEnd, numParticles); // check if kernel invocation generated an error cutilCheckMsg("Kernel execution failed"); } void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles) { thrust::sort_by_key(thrust::device_ptr<uint>(dGridParticleHash), thrust::device_ptr<uint>(dGridParticleHash + numParticles), thrust::device_ptr<uint>(dGridParticleIndex)); } } // extern "C"
bcc79d21819b5579324c54f14ea85d80267d7d19.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/fill.h> #include <thrust/device_allocator.h> #include <thrust/iterator/counting_iterator.h> #include "labels.h" #include <fstream> using namespace std; __device__ double myatomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } namespace kmeans { namespace detail { __device__ __forceinline__ void update_centroid(int label, int dimension,int d,double accumulator, double* centroids, int count, int* counts) { int index = label * d + dimension; double* target = centroids + index; myatomicAdd(target, accumulator); if (dimension == 0) { myatomicAdd((double*)counts + label, count); } } __global__ void calculate_count(int* ordered_labels,int* counts){ int dataindex = threadIdx.x + blockIdx.x * blockDim.x; int thislabel = ordered_labels[dataindex]; atomicAdd(counts + thislabel,1); } __global__ void calculate_centroids(int n, int d, int k, double* data, int* ordered_labels, int* ordered_indices, double* centroids){ int global_id_x = threadIdx.x; int global_id_y = threadIdx.y + blockIdx.y * blockDim.y; if((global_id_x < d) && (global_id_y < n)){ int label = ordered_labels[global_id_y]; int indice = ordered_indices[global_id_y]; double ademisiondata = data[indice*d + global_id_x]; double *target = centroids + label * d+global_id_x; myatomicAdd(target,ademisiondata); } } __global__ void scale_centroids(int d, int k, int* counts, double* centroids) { int global_id_x = threadIdx.x ; int global_id_y = threadIdx.y + blockIdx.y * blockDim.y; if ((global_id_x < d) && (global_id_y < k)) { int count = counts[global_id_y]; //To avoid introducing divide by zero errors //If a centroid has no weight, we'll do no normalization //This will keep its coordinates defined. if (count < 1) { count = 1; } double scale = 1.0/double(count); centroids[global_id_x + d * global_id_y] *= scale; } } void Read_Center(int k,int d,thrust::device_vector<double>& centroids){ thrust::host_vector<double> host_centroids(k*d); ifstream infile; std::string filename = "/data/006zzy/files/tempcenter.txt"; infile.open(filename); for(int i = 0;i < k;i++){ for(int j = 0;j < d;j++){ infile >> host_centroids[i * d + j]; } } centroids = host_centroids; infile.close(); std::cout << "slave read tempcenter.txt is ok..." << std::endl; /* for(int i = 0;i < k;i++) for(int j = 0;j < d;j++){ std::cout << centroids[i*d + j] << " "; } std::cout <<std::endl; */ } void Save_Center(int k,int d,thrust::host_vector<double>& centroids,int index){ /* std::cout <<"h_centroids :"<<std::endl; for(int i = 0;i < k;i++){ for(int j = 0;j < d;j++){ std::cout << centroids[i*d + j] << " "; } } std::cout<<std::endl;*/ std::string filename = "/data/006zzy/files/tempdata_"; std::string number = std::to_string(index); filename += number; filename += ".txt"; ofstream outfile; outfile.open(filename); for(int i = 0;i < k;i++){ for(int j = 0;j < d;j++){ outfile << centroids[i * d + j ]; outfile << " "; } } outfile.close(); } void find_centroids(int n, int d, int k, thrust::device_vector<double>& data, thrust::device_vector<int>& labels, thrust::device_vector<double>& centroids, thrust::device_vector<int>& range, thrust::device_vector<int>& indices, thrust::device_vector<int>& counts) { int dev_num; hipGetDevice(&dev_num); detail::mymemcpy(indices,range); //Bring all labels with the same value together #if 1 thrust::sort_by_key(labels.begin(), labels.end(), indices.begin()); #else mycub::sort_by_key_int(labels, indices);//wrong! #endif //Initialize centroids to all zeros detail::mymemzero(centroids); //Initialize counts to all zeros detail::mymemzero(counts); //Calculate centroids int n_threads_x = n;//old:64 int n_threads_y = 1;//old:16 //XXX Number of blocks here is hard coded at 30 //This should be taken care of more thoughtfully. //dim3(1,1),old:Dim3(1,30) hipLaunchKernelGGL(( detail::calculate_count), dim3(dim3(1, 1)), dim3(dim3(n_threads_x, n_threads_y)), 0, cuda_stream[dev_num], thrust::raw_pointer_cast(labels.data()), thrust::raw_pointer_cast(counts.data())); n_threads_x = 512;//old:64 n_threads_y = 2;//old:16 //contain 2*128 = 256 points hipLaunchKernelGGL(( detail::calculate_centroids), dim3(dim3(1, 128)), dim3(dim3(n_threads_x, n_threads_y)), 0, cuda_stream[dev_num], n, d, k, thrust::raw_pointer_cast(data.data()), thrust::raw_pointer_cast(labels.data()), thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(centroids.data())); //Scale centroids n_threads_x = 512; n_threads_y = 2; //y:k x:d hipLaunchKernelGGL(( detail::scale_centroids), dim3(dim3(1,128)), dim3(dim3(n_threads_x, n_threads_y)), 0, cuda_stream[dev_num], d, k, thrust::raw_pointer_cast(counts.data()), thrust::raw_pointer_cast(centroids.data())); /* //print counts for(int i = 0;i <k;i++){ std::cout << counts[i] <<" "; } std::cout << std::endl; //print &new center std::cout <<"centroids in find centroids end:"<<std::endl; for(int i = 0;i < n;i++){ for(int j = 0;j < d;j++){ std::cout << centroids[i*d + j] << " "; } } std::cout << std::endl;*/ } } }
bcc79d21819b5579324c54f14ea85d80267d7d19.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <cuda.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/fill.h> #include <thrust/device_allocator.h> #include <thrust/iterator/counting_iterator.h> #include "labels.h" #include <fstream> using namespace std; __device__ double myatomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } namespace kmeans { namespace detail { __device__ __forceinline__ void update_centroid(int label, int dimension,int d,double accumulator, double* centroids, int count, int* counts) { int index = label * d + dimension; double* target = centroids + index; myatomicAdd(target, accumulator); if (dimension == 0) { myatomicAdd((double*)counts + label, count); } } __global__ void calculate_count(int* ordered_labels,int* counts){ int dataindex = threadIdx.x + blockIdx.x * blockDim.x; int thislabel = ordered_labels[dataindex]; atomicAdd(counts + thislabel,1); } __global__ void calculate_centroids(int n, int d, int k, double* data, int* ordered_labels, int* ordered_indices, double* centroids){ int global_id_x = threadIdx.x; int global_id_y = threadIdx.y + blockIdx.y * blockDim.y; if((global_id_x < d) && (global_id_y < n)){ int label = ordered_labels[global_id_y]; int indice = ordered_indices[global_id_y]; double ademisiondata = data[indice*d + global_id_x]; double *target = centroids + label * d+global_id_x; myatomicAdd(target,ademisiondata); } } __global__ void scale_centroids(int d, int k, int* counts, double* centroids) { int global_id_x = threadIdx.x ; int global_id_y = threadIdx.y + blockIdx.y * blockDim.y; if ((global_id_x < d) && (global_id_y < k)) { int count = counts[global_id_y]; //To avoid introducing divide by zero errors //If a centroid has no weight, we'll do no normalization //This will keep its coordinates defined. if (count < 1) { count = 1; } double scale = 1.0/double(count); centroids[global_id_x + d * global_id_y] *= scale; } } void Read_Center(int k,int d,thrust::device_vector<double>& centroids){ thrust::host_vector<double> host_centroids(k*d); ifstream infile; std::string filename = "/data/006zzy/files/tempcenter.txt"; infile.open(filename); for(int i = 0;i < k;i++){ for(int j = 0;j < d;j++){ infile >> host_centroids[i * d + j]; } } centroids = host_centroids; infile.close(); std::cout << "slave read tempcenter.txt is ok..." << std::endl; /* for(int i = 0;i < k;i++) for(int j = 0;j < d;j++){ std::cout << centroids[i*d + j] << " "; } std::cout <<std::endl; */ } void Save_Center(int k,int d,thrust::host_vector<double>& centroids,int index){ /* std::cout <<"h_centroids :"<<std::endl; for(int i = 0;i < k;i++){ for(int j = 0;j < d;j++){ std::cout << centroids[i*d + j] << " "; } } std::cout<<std::endl;*/ std::string filename = "/data/006zzy/files/tempdata_"; std::string number = std::to_string(index); filename += number; filename += ".txt"; ofstream outfile; outfile.open(filename); for(int i = 0;i < k;i++){ for(int j = 0;j < d;j++){ outfile << centroids[i * d + j ]; outfile << " "; } } outfile.close(); } void find_centroids(int n, int d, int k, thrust::device_vector<double>& data, thrust::device_vector<int>& labels, thrust::device_vector<double>& centroids, thrust::device_vector<int>& range, thrust::device_vector<int>& indices, thrust::device_vector<int>& counts) { int dev_num; cudaGetDevice(&dev_num); detail::mymemcpy(indices,range); //Bring all labels with the same value together #if 1 thrust::sort_by_key(labels.begin(), labels.end(), indices.begin()); #else mycub::sort_by_key_int(labels, indices);//wrong! #endif //Initialize centroids to all zeros detail::mymemzero(centroids); //Initialize counts to all zeros detail::mymemzero(counts); //Calculate centroids int n_threads_x = n;//old:64 int n_threads_y = 1;//old:16 //XXX Number of blocks here is hard coded at 30 //This should be taken care of more thoughtfully. //dim3(1,1),old:Dim3(1,30) detail::calculate_count<<<dim3(1, 1), dim3(n_threads_x, n_threads_y), 0, cuda_stream[dev_num]>>> (thrust::raw_pointer_cast(labels.data()), thrust::raw_pointer_cast(counts.data())); n_threads_x = 512;//old:64 n_threads_y = 2;//old:16 //contain 2*128 = 256 points detail::calculate_centroids<<<dim3(1, 128), dim3(n_threads_x, n_threads_y), 0, cuda_stream[dev_num]>>> (n, d, k, thrust::raw_pointer_cast(data.data()), thrust::raw_pointer_cast(labels.data()), thrust::raw_pointer_cast(indices.data()), thrust::raw_pointer_cast(centroids.data())); //Scale centroids n_threads_x = 512; n_threads_y = 2; //y:k x:d detail::scale_centroids<<<dim3(1,128), dim3(n_threads_x, n_threads_y), 0, cuda_stream[dev_num]>>> (d, k, thrust::raw_pointer_cast(counts.data()), thrust::raw_pointer_cast(centroids.data())); /* //print counts for(int i = 0;i <k;i++){ std::cout << counts[i] <<" "; } std::cout << std::endl; //print &new center std::cout <<"centroids in find centroids end:"<<std::endl; for(int i = 0;i < n;i++){ for(int j = 0;j < d;j++){ std::cout << centroids[i*d + j] << " "; } } std::cout << std::endl;*/ } } }
5219dac6beb36386bdb5b0b275180c62eae35ce5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define THREADS 256 #define BLOCKS 32 #define NUM THREADS*BLOCKS int seed_var =1239; __global__ void work_efficient_scan_kernel(int *X, int *Y, int InputSize) { extern __shared__ int XY[]; int i= blockIdx.x*blockDim.x+ threadIdx.x; if (i < InputSize) { XY[threadIdx.x] = X[i]; } for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); int index = (threadIdx.x+1) * 2* stride -1; if (index < blockDim.x) { XY[index] += XY[index -stride]; } } for (int stride = THREADS/4; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x+1)*stride*2 -1; if(index + stride < THREADS) { XY[index + stride] += XY[index]; } } __syncthreads(); Y[i] = XY[threadIdx.x]; //OWN CODE __syncthreads(); if(threadIdx.x < blockIdx.x) { XY[threadIdx.x] = Y[threadIdx.x*blockDim.x + (blockDim.x-1)]; } __syncthreads(); for(unsigned int stride =0; stride < blockIdx.x; stride++) { Y[i] += XY[stride]; } __syncthreads(); }
5219dac6beb36386bdb5b0b275180c62eae35ce5.cu
#include "includes.h" #define THREADS 256 #define BLOCKS 32 #define NUM THREADS*BLOCKS int seed_var =1239; __global__ void work_efficient_scan_kernel(int *X, int *Y, int InputSize) { extern __shared__ int XY[]; int i= blockIdx.x*blockDim.x+ threadIdx.x; if (i < InputSize) { XY[threadIdx.x] = X[i]; } for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); int index = (threadIdx.x+1) * 2* stride -1; if (index < blockDim.x) { XY[index] += XY[index -stride]; } } for (int stride = THREADS/4; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x+1)*stride*2 -1; if(index + stride < THREADS) { XY[index + stride] += XY[index]; } } __syncthreads(); Y[i] = XY[threadIdx.x]; //OWN CODE __syncthreads(); if(threadIdx.x < blockIdx.x) { XY[threadIdx.x] = Y[threadIdx.x*blockDim.x + (blockDim.x-1)]; } __syncthreads(); for(unsigned int stride =0; stride < blockIdx.x; stride++) { Y[i] += XY[stride]; } __syncthreads(); }
f5b5e1a2d2c97213da6efc74dc050ca6177f8e4d.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * Main entry of dense matrix-matrix multiplication kernel */ #include <malloc.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <iostream> #include <vector> #include "../FFT/parboil.h" #include "../benchmark_common.h" #include "sgemm_kernel.hip" // I/O routines extern bool readColMajorMatrixFile(const char* fn, int& nr_row, int& nr_col, std::vector<float>& v); extern bool writeColMajorMatrixFile(const char* fn, int, int, std::vector<float>&); extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); // int // main (int argc, char *argv[]) { int main_MM(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { struct pb_TimerSet timers; float *dA, *dB, *dC; size_t A_sz, B_sz, C_sz; int matArow, matAcol; int matBrow, matBcol; std::vector<float> matA, matBT; pb_InitializeTimerSet(&timers); /* Read command line. Expect 3 inputs: A, B and B^T in column-major layout*/ /*params = pb_ReadParameters(); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] == NULL) || (params->inpFiles[2] == NULL) || (params->inpFiles[3] != NULL)) { fprintf(stderr, "Expecting three input filenames\n"); exit(-1); }*/ /* Read in data */ pb_SwitchToTimer(&timers, pb_TimerID_IO); // load A readColMajorMatrixFile((char*)"MM/matrix1.txt", matArow, matAcol, matA); // copy A to device memory A_sz = matArow * matAcol * sizeof(float); // load B^T readColMajorMatrixFile((char*)"MM/matrix2t.txt", matBcol, matBrow, matBT); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); B_sz = matBrow * matBcol * sizeof(float); // allocate space for C C_sz = matArow * matBcol * sizeof(float); // CUDA memory allocation std::vector<float> matC(matArow * matBcol); hipMalloc((void**)&dA, A_sz); hipMalloc((void**)&dB, B_sz); hipMalloc((void**)&dC, C_sz); // Copy A and B^T into device memory pb_SwitchToTimer(&timers, pb_TimerID_COPY); hipMemcpyAsync(dA, &matA.front(), A_sz, hipMemcpyHostToDevice, stream_app); hipMemcpyAsync(dB, &matBT.front(), B_sz, hipMemcpyHostToDevice, stream_app); pb_SwitchToTimer(&timers, pb_TimerID_GPU); std::cout << "flag = " << flag << std::endl; // Use standard sgemm interface regtileSgemm('N', 'T', matArow, matBcol, matAcol, 1.0f, dA, matArow, dB, matBcol, 0.0f, dC, matArow, stream_app, mutexapp, flag); std::cout << "kernel launch finishes" << std::endl; // if (params->outFile) { pb_SwitchToTimer(&timers, pb_TimerID_COPY); hipMemcpyAsync(&matC.front(), dC, C_sz, hipMemcpyDeviceToHost, stream_app); /* Write C to file */ pb_SwitchToTimer(&timers, pb_TimerID_IO); writeColMajorMatrixFile((char*)"MM/matrix3.txt", matArow, matBcol, matC); //} pb_SwitchToTimer(&timers, pb_TimerID_NONE); double GPUtime = pb_GetElapsedTime(&(timers.timers[pb_TimerID_GPU])); std::cout << "GFLOPs = " << 2. * matArow * matBcol * matAcol / GPUtime / 1e9 << std::endl; pb_PrintTimerSet(&timers); hipFree(dA); hipFree(dB); hipFree(dC); return 0; }
f5b5e1a2d2c97213da6efc74dc050ca6177f8e4d.cu
/*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * Main entry of dense matrix-matrix multiplication kernel */ #include <malloc.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <iostream> #include <vector> #include "../FFT/parboil.h" #include "../benchmark_common.h" #include "sgemm_kernel.cu" // I/O routines extern bool readColMajorMatrixFile(const char* fn, int& nr_row, int& nr_col, std::vector<float>& v); extern bool writeColMajorMatrixFile(const char* fn, int, int, std::vector<float>&); extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); // int // main (int argc, char *argv[]) { int main_MM(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) { struct pb_TimerSet timers; float *dA, *dB, *dC; size_t A_sz, B_sz, C_sz; int matArow, matAcol; int matBrow, matBcol; std::vector<float> matA, matBT; pb_InitializeTimerSet(&timers); /* Read command line. Expect 3 inputs: A, B and B^T in column-major layout*/ /*params = pb_ReadParameters(); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] == NULL) || (params->inpFiles[2] == NULL) || (params->inpFiles[3] != NULL)) { fprintf(stderr, "Expecting three input filenames\n"); exit(-1); }*/ /* Read in data */ pb_SwitchToTimer(&timers, pb_TimerID_IO); // load A readColMajorMatrixFile((char*)"MM/matrix1.txt", matArow, matAcol, matA); // copy A to device memory A_sz = matArow * matAcol * sizeof(float); // load B^T readColMajorMatrixFile((char*)"MM/matrix2t.txt", matBcol, matBrow, matBT); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); B_sz = matBrow * matBcol * sizeof(float); // allocate space for C C_sz = matArow * matBcol * sizeof(float); // CUDA memory allocation std::vector<float> matC(matArow * matBcol); cudaMalloc((void**)&dA, A_sz); cudaMalloc((void**)&dB, B_sz); cudaMalloc((void**)&dC, C_sz); // Copy A and B^T into device memory pb_SwitchToTimer(&timers, pb_TimerID_COPY); cudaMemcpyAsync(dA, &matA.front(), A_sz, cudaMemcpyHostToDevice, stream_app); cudaMemcpyAsync(dB, &matBT.front(), B_sz, cudaMemcpyHostToDevice, stream_app); pb_SwitchToTimer(&timers, pb_TimerID_GPU); std::cout << "flag = " << flag << std::endl; // Use standard sgemm interface regtileSgemm('N', 'T', matArow, matBcol, matAcol, 1.0f, dA, matArow, dB, matBcol, 0.0f, dC, matArow, stream_app, mutexapp, flag); std::cout << "kernel launch finishes" << std::endl; // if (params->outFile) { pb_SwitchToTimer(&timers, pb_TimerID_COPY); cudaMemcpyAsync(&matC.front(), dC, C_sz, cudaMemcpyDeviceToHost, stream_app); /* Write C to file */ pb_SwitchToTimer(&timers, pb_TimerID_IO); writeColMajorMatrixFile((char*)"MM/matrix3.txt", matArow, matBcol, matC); //} pb_SwitchToTimer(&timers, pb_TimerID_NONE); double GPUtime = pb_GetElapsedTime(&(timers.timers[pb_TimerID_GPU])); std::cout << "GFLOPs = " << 2. * matArow * matBcol * matAcol / GPUtime / 1e9 << std::endl; pb_PrintTimerSet(&timers); cudaFree(dA); cudaFree(dB); cudaFree(dC); return 0; }
b5809e4601dada460720b65c517cc7fd6d26d232.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "../common/book.h" #define N (1024*1024) #define FULL_DATA_SIZE (N*20) __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main( void ) { hipDeviceProp_t prop; int whichDevice; HANDLE_ERROR( hipGetDevice( &whichDevice ) ); HANDLE_ERROR( hipGetDeviceProperties( &prop, whichDevice ) ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no speed up from streams\n" ); return 0; } hipEvent_t start, stop; float elapsedTime; hipStream_t stream0, stream1; int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; int *dev_a1, *dev_b1, *dev_c1; // start the timers HANDLE_ERROR( hipEventCreate( &start ) ); HANDLE_ERROR( hipEventCreate( &stop ) ); // initialize the streams HANDLE_ERROR( hipStreamCreate( &stream0 ) ); HANDLE_ERROR( hipStreamCreate( &stream1 ) ); // allocate the memory on the GPU HANDLE_ERROR( hipMalloc( (void**)&dev_a0, N * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_b0, N * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_c0, N * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_a1, N * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_b1, N * sizeof(int) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_c1, N * sizeof(int) ) ); // allocate host locked memory, used to stream HANDLE_ERROR( hipHostMalloc( (void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault ) ); HANDLE_ERROR( hipHostMalloc( (void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault ) ); HANDLE_ERROR( hipHostMalloc( (void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault ) ); for (int i=0; i<FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } HANDLE_ERROR( hipEventRecord( start, 0 ) ); // now loop over full data, in bite-sized chunks for (int i=0; i<FULL_DATA_SIZE; i+= N*2) { // copy the locked memory to the device, async HANDLE_ERROR( hipMemcpyAsync( dev_a0, host_a+i, N * sizeof(int), hipMemcpyHostToDevice, stream0 ) ); HANDLE_ERROR( hipMemcpyAsync( dev_b0, host_b+i, N * sizeof(int), hipMemcpyHostToDevice, stream0 ) ); hipLaunchKernelGGL(( kernel), dim3(N/256),dim3(256),0,stream0, dev_a0, dev_b0, dev_c0 ); // copy the data from device to locked memory HANDLE_ERROR( hipMemcpyAsync( host_c+i, dev_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream0 ) ); // copy the locked memory to the device, async HANDLE_ERROR( hipMemcpyAsync( dev_a1, host_a+i+N, N * sizeof(int), hipMemcpyHostToDevice, stream1 ) ); HANDLE_ERROR( hipMemcpyAsync( dev_b1, host_b+i+N, N * sizeof(int), hipMemcpyHostToDevice, stream1 ) ); hipLaunchKernelGGL(( kernel), dim3(N/256),dim3(256),0,stream1, dev_a1, dev_b1, dev_c1 ); // copy the data from device to locked memory HANDLE_ERROR( hipMemcpyAsync( host_c+i+N, dev_c1, N * sizeof(int), hipMemcpyDeviceToHost, stream1 ) ); } HANDLE_ERROR( hipStreamSynchronize( stream0 ) ); HANDLE_ERROR( hipStreamSynchronize( stream1 ) ); HANDLE_ERROR( hipEventRecord( stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( stop ) ); HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time taken: %3.1f ms\n", elapsedTime ); // cleanup the streams and memory HANDLE_ERROR( hipHostFree( host_a ) ); HANDLE_ERROR( hipHostFree( host_b ) ); HANDLE_ERROR( hipHostFree( host_c ) ); HANDLE_ERROR( hipFree( dev_a0 ) ); HANDLE_ERROR( hipFree( dev_b0 ) ); HANDLE_ERROR( hipFree( dev_c0 ) ); HANDLE_ERROR( hipFree( dev_a1 ) ); HANDLE_ERROR( hipFree( dev_b1 ) ); HANDLE_ERROR( hipFree( dev_c1 ) ); HANDLE_ERROR( hipStreamDestroy( stream0 ) ); HANDLE_ERROR( hipStreamDestroy( stream1 ) ); return 0; }
b5809e4601dada460720b65c517cc7fd6d26d232.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "../common/book.h" #define N (1024*1024) #define FULL_DATA_SIZE (N*20) __global__ void kernel( int *a, int *b, int *c ) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main( void ) { cudaDeviceProp prop; int whichDevice; HANDLE_ERROR( cudaGetDevice( &whichDevice ) ); HANDLE_ERROR( cudaGetDeviceProperties( &prop, whichDevice ) ); if (!prop.deviceOverlap) { printf( "Device will not handle overlaps, so no speed up from streams\n" ); return 0; } cudaEvent_t start, stop; float elapsedTime; cudaStream_t stream0, stream1; int *host_a, *host_b, *host_c; int *dev_a0, *dev_b0, *dev_c0; int *dev_a1, *dev_b1, *dev_c1; // start the timers HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); // initialize the streams HANDLE_ERROR( cudaStreamCreate( &stream0 ) ); HANDLE_ERROR( cudaStreamCreate( &stream1 ) ); // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a0, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b0, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c0, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_a1, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b1, N * sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_c1, N * sizeof(int) ) ); // allocate host locked memory, used to stream HANDLE_ERROR( cudaHostAlloc( (void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); HANDLE_ERROR( cudaHostAlloc( (void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) ); for (int i=0; i<FULL_DATA_SIZE; i++) { host_a[i] = rand(); host_b[i] = rand(); } HANDLE_ERROR( cudaEventRecord( start, 0 ) ); // now loop over full data, in bite-sized chunks for (int i=0; i<FULL_DATA_SIZE; i+= N*2) { // copy the locked memory to the device, async HANDLE_ERROR( cudaMemcpyAsync( dev_a0, host_a+i, N * sizeof(int), cudaMemcpyHostToDevice, stream0 ) ); HANDLE_ERROR( cudaMemcpyAsync( dev_b0, host_b+i, N * sizeof(int), cudaMemcpyHostToDevice, stream0 ) ); kernel<<<N/256,256,0,stream0>>>( dev_a0, dev_b0, dev_c0 ); // copy the data from device to locked memory HANDLE_ERROR( cudaMemcpyAsync( host_c+i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0 ) ); // copy the locked memory to the device, async HANDLE_ERROR( cudaMemcpyAsync( dev_a1, host_a+i+N, N * sizeof(int), cudaMemcpyHostToDevice, stream1 ) ); HANDLE_ERROR( cudaMemcpyAsync( dev_b1, host_b+i+N, N * sizeof(int), cudaMemcpyHostToDevice, stream1 ) ); kernel<<<N/256,256,0,stream1>>>( dev_a1, dev_b1, dev_c1 ); // copy the data from device to locked memory HANDLE_ERROR( cudaMemcpyAsync( host_c+i+N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1 ) ); } HANDLE_ERROR( cudaStreamSynchronize( stream0 ) ); HANDLE_ERROR( cudaStreamSynchronize( stream1 ) ); HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time taken: %3.1f ms\n", elapsedTime ); // cleanup the streams and memory HANDLE_ERROR( cudaFreeHost( host_a ) ); HANDLE_ERROR( cudaFreeHost( host_b ) ); HANDLE_ERROR( cudaFreeHost( host_c ) ); HANDLE_ERROR( cudaFree( dev_a0 ) ); HANDLE_ERROR( cudaFree( dev_b0 ) ); HANDLE_ERROR( cudaFree( dev_c0 ) ); HANDLE_ERROR( cudaFree( dev_a1 ) ); HANDLE_ERROR( cudaFree( dev_b1 ) ); HANDLE_ERROR( cudaFree( dev_c1 ) ); HANDLE_ERROR( cudaStreamDestroy( stream0 ) ); HANDLE_ERROR( cudaStreamDestroy( stream1 ) ); return 0; }
3a2b747031e1fda14c2fa9db7442cc4b499066fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #include <assert.h> #include <cutil_inline.h> // #include <vector> //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ float c_Kernel_h[100]; __constant__ float c_Kernel_v[100]; extern "C" void setConvolutionKernel_horizontal(float *h_Kernel, int kernel_length){ hipMemcpyToSymbol(c_Kernel_h, h_Kernel, kernel_length * sizeof(float)); } extern "C" void setConvolutionKernel_vertical(float *h_Kernel, int kernel_length){ hipMemcpyToSymbol(c_Kernel_v, h_Kernel, kernel_length * sizeof(float)); } //////////////////////////////////////////////////////////////////////////////// // Constants //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 16 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 3 #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 16 #define COLUMNS_RESULT_STEPS 8 #define COLUMNS_HALO_STEPS 3 //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// __global__ void convolutionRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch, int kernel_radius ){ __shared__ float s_Data[ROWS_BLOCKDIM_Y] [(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) s_Data[threadIdx.y] [threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; //Left halo for(int i = 0; i < ROWS_HALO_STEPS; i++){ s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X ) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Right halo for(int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++){ s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++){ float sum = 0; #pragma unroll for(int j = -kernel_radius; j <= kernel_radius; j++) sum += c_Kernel_h[kernel_radius - j] * s_Data [threadIdx.y] [threadIdx.x + i * ROWS_BLOCKDIM_X + j]; d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } extern "C" void convolutionRowsGPU( float *d_Dst, float *d_Src, int imageW, int imageH, int kernel_radius ){ assert( ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= kernel_radius ); //There is a rational division of the image into blocks assert( imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0 ); assert( imageH % ROWS_BLOCKDIM_Y == 0 ); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionRowsKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageW, kernel_radius ); cutilCheckMsg("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// __global__ void convolutionColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch, int kernel_radius ){ __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; //Upper halo for(int i = 0; i < COLUMNS_HALO_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; //Lower halo for(int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; //Compute and store results __syncthreads(); #pragma unroll for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++){ float sum = 0; #pragma unroll for(int j = -kernel_radius; j <= kernel_radius; j++) sum += c_Kernel_v[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } extern "C" void convolutionColumnsGPU( float *d_Dst, float *d_Src, int imageW, int imageH, int kernel_radius ){ assert( COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= kernel_radius ); assert( imageW % COLUMNS_BLOCKDIM_X == 0 ); assert( imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0 ); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); hipLaunchKernelGGL(( convolutionColumnsKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageW, kernel_radius ); cutilCheckMsg("convolutionColumnsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Simple interface to compute a derivative //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Computes the higher eigenvalue of the hessian //////////////////////////////////////////////////////////////////////////////// __global__ void hessianKernel( float *d_output, float *d_gxx, float *d_gxy, float *d_gyy, float scale, int imageW, int imageH, int invert ){ int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW + blockDim.x*blockIdx.x+threadIdx.x; float a, b, c; a = invert*d_gxx[i]; b = invert*d_gxy[i]; c = invert*d_gyy[i]; d_output[i] = ((a+c)/2 + sqrt( (a-c)*(a-c) + 4*b*b)/2)*scale*scale; // d_output[i] = (a-c)*(a-c) + 4*b*b; // d_output[i] = b; } extern "C" void hessianGPU ( float *d_output, float *d_gxx, float *d_gxy, float *d_gyy, float scale, int imageW, int imageH, int invert ) { dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y)); dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( hessianKernel), dim3(gird), dim3(block), 0, 0, d_output, d_gxx, d_gxy, d_gyy, scale, imageW, imageH, int invert ); cutilCheckMsg("hessianKernel() execution failed\n"); } //////////////// MAX ///////// __global__ void maxKernel( float *d_output, float *d_isMaxThanOutput, int imageW, int imageH ){ int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW + blockDim.x*blockIdx.x+threadIdx.x; if(d_isMaxThanOutput[i] >= d_output[i]) d_output[i] = d_isMaxThanOutput[i]; } extern "C" void maxGPU ( float *d_output, float *d_isMaxThanOutput, int imageW, int imageH ) { dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y)); dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( maxKernel), dim3(gird), dim3(block), 0, 0, d_output, d_isMaxThanOutput, imageW, imageH ); cutilCheckMsg("maxKernel() execution failed\n"); } __global__ void maxKernel_scale( float *d_output, float *d_scale, float *d_isMaxThanOutput, float scale, int imageW, int imageH ){ int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW + blockDim.x*blockIdx.x+threadIdx.x; if(d_isMaxThanOutput[i] > d_output[i]){ d_output[i] = d_isMaxThanOutput[i]; if(d_output[i] > 30) d_scale[i] = scale; } } extern "C" void maxGPU_scale ( float *d_output, float *d_scale, float *d_isMaxThanOutput, float scale, int imageW, int imageH ) { dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y)); dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( maxKernel_scale), dim3(gird), dim3(block), 0, 0, d_output, d_scale, d_isMaxThanOutput, scale, imageW, imageH ); cutilCheckMsg("maxKernel() execution failed\n"); } ////////////////////////// PUT VALUE __global__ void putKernel( float *d_output, float value, int imageW, int imageH ){ int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW + blockDim.x*blockIdx.x+threadIdx.x; d_output[i] = value; } extern "C" void putGPU ( float *d_output, float value, int imageW, int imageH ) { dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y)); dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( putKernel), dim3(gird), dim3(block), 0, 0, d_output, value, imageW, imageH ); cutilCheckMsg("maxKernel() execution failed\n"); }
3a2b747031e1fda14c2fa9db7442cc4b499066fa.cu
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #include <assert.h> #include <cutil_inline.h> // #include <vector> //////////////////////////////////////////////////////////////////////////////// // Convolution kernel storage //////////////////////////////////////////////////////////////////////////////// __constant__ float c_Kernel_h[100]; __constant__ float c_Kernel_v[100]; extern "C" void setConvolutionKernel_horizontal(float *h_Kernel, int kernel_length){ cudaMemcpyToSymbol(c_Kernel_h, h_Kernel, kernel_length * sizeof(float)); } extern "C" void setConvolutionKernel_vertical(float *h_Kernel, int kernel_length){ cudaMemcpyToSymbol(c_Kernel_v, h_Kernel, kernel_length * sizeof(float)); } //////////////////////////////////////////////////////////////////////////////// // Constants //////////////////////////////////////////////////////////////////////////////// #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 16 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 3 #define COLUMNS_BLOCKDIM_X 16 #define COLUMNS_BLOCKDIM_Y 16 #define COLUMNS_RESULT_STEPS 8 #define COLUMNS_HALO_STEPS 3 //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// __global__ void convolutionRowsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch, int kernel_radius ){ __shared__ float s_Data[ROWS_BLOCKDIM_Y] [(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) s_Data[threadIdx.y] [threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; //Left halo for(int i = 0; i < ROWS_HALO_STEPS; i++){ s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X ) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Right halo for(int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++){ s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++){ float sum = 0; #pragma unroll for(int j = -kernel_radius; j <= kernel_radius; j++) sum += c_Kernel_h[kernel_radius - j] * s_Data [threadIdx.y] [threadIdx.x + i * ROWS_BLOCKDIM_X + j]; d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } extern "C" void convolutionRowsGPU( float *d_Dst, float *d_Src, int imageW, int imageH, int kernel_radius ){ assert( ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= kernel_radius ); //There is a rational division of the image into blocks assert( imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0 ); assert( imageH % ROWS_BLOCKDIM_Y == 0 ); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); convolutionRowsKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageW, kernel_radius ); cutilCheckMsg("convolutionRowsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// __global__ void convolutionColumnsKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int pitch, int kernel_radius ){ __shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; //Main data #pragma unroll for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; //Upper halo for(int i = 0; i < COLUMNS_HALO_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; //Lower halo for(int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; //Compute and store results __syncthreads(); #pragma unroll for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++){ float sum = 0; #pragma unroll for(int j = -kernel_radius; j <= kernel_radius; j++) sum += c_Kernel_v[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } extern "C" void convolutionColumnsGPU( float *d_Dst, float *d_Src, int imageW, int imageH, int kernel_radius ){ assert( COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= kernel_radius ); assert( imageW % COLUMNS_BLOCKDIM_X == 0 ); assert( imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0 ); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); convolutionColumnsKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageW, kernel_radius ); cutilCheckMsg("convolutionColumnsKernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Simple interface to compute a derivative //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Computes the higher eigenvalue of the hessian //////////////////////////////////////////////////////////////////////////////// __global__ void hessianKernel( float *d_output, float *d_gxx, float *d_gxy, float *d_gyy, float scale, int imageW, int imageH, int invert ){ int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW + blockDim.x*blockIdx.x+threadIdx.x; float a, b, c; a = invert*d_gxx[i]; b = invert*d_gxy[i]; c = invert*d_gyy[i]; d_output[i] = ((a+c)/2 + sqrt( (a-c)*(a-c) + 4*b*b)/2)*scale*scale; // d_output[i] = (a-c)*(a-c) + 4*b*b; // d_output[i] = b; } extern "C" void hessianGPU ( float *d_output, float *d_gxx, float *d_gxy, float *d_gyy, float scale, int imageW, int imageH, int invert ) { dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y)); dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y); hessianKernel<<<gird, block>>>( d_output, d_gxx, d_gxy, d_gyy, scale, imageW, imageH, int invert ); cutilCheckMsg("hessianKernel() execution failed\n"); } //////////////// MAX ///////// __global__ void maxKernel( float *d_output, float *d_isMaxThanOutput, int imageW, int imageH ){ int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW + blockDim.x*blockIdx.x+threadIdx.x; if(d_isMaxThanOutput[i] >= d_output[i]) d_output[i] = d_isMaxThanOutput[i]; } extern "C" void maxGPU ( float *d_output, float *d_isMaxThanOutput, int imageW, int imageH ) { dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y)); dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y); maxKernel<<<gird, block>>>( d_output, d_isMaxThanOutput, imageW, imageH ); cutilCheckMsg("maxKernel() execution failed\n"); } __global__ void maxKernel_scale( float *d_output, float *d_scale, float *d_isMaxThanOutput, float scale, int imageW, int imageH ){ int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW + blockDim.x*blockIdx.x+threadIdx.x; if(d_isMaxThanOutput[i] > d_output[i]){ d_output[i] = d_isMaxThanOutput[i]; if(d_output[i] > 30) d_scale[i] = scale; } } extern "C" void maxGPU_scale ( float *d_output, float *d_scale, float *d_isMaxThanOutput, float scale, int imageW, int imageH ) { dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y)); dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y); maxKernel_scale<<<gird, block>>>( d_output, d_scale, d_isMaxThanOutput, scale, imageW, imageH ); cutilCheckMsg("maxKernel() execution failed\n"); } ////////////////////////// PUT VALUE __global__ void putKernel( float *d_output, float value, int imageW, int imageH ){ int i = (blockDim.y*blockIdx.y + threadIdx.y)*imageW + blockDim.x*blockIdx.x+threadIdx.x; d_output[i] = value; } extern "C" void putGPU ( float *d_output, float value, int imageW, int imageH ) { dim3 gird (ceil(float(imageW)/ROWS_BLOCKDIM_X),ceil(float(imageH)/ROWS_BLOCKDIM_Y)); dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y); putKernel<<<gird, block>>>( d_output, value, imageW, imageH ); cutilCheckMsg("maxKernel() execution failed\n"); }
798d7b314ad0e76a22e67230e81a0d4a682cc1e6.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <hip/hip_runtime.h> /* * This file includes simple demonstrations of a variety of shuffle * instructions. */ #define BDIMX 16 #define SEGM 4 void printData(int *in, const int size) { for (int i = 0; i < size; i++) { printf("%2d ", in[i]); } printf("\n"); } __global__ void test_shfl_broadcast(int *d_out, int *d_in, int const srcLane) { int value = d_in[threadIdx.x]; value = __shfl(value, srcLane, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_pattern (int *d_out, int *d_in, int *offset) { int value = d_in[threadIdx.x]; value = __shfl(value, offset[threadIdx.x], BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_wrap (int *d_out, int *d_in, int const offset) { int value = d_in[threadIdx.x]; value = __shfl(value, threadIdx.x + offset, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_wrap_plus (int *d_out, int *d_in, int const offset) { int value = d_in[threadIdx.x]; value += __shfl(value, threadIdx.x + offset, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_up(int *d_out, int *d_in, unsigned int const delta) { int value = d_in[threadIdx.x]; value = __shfl_up (value, delta, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_down(int *d_out, int *d_in, unsigned int const delta) { int value = d_in[threadIdx.x]; value = __shfl_down (value, delta, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_xor(int *d_out, int *d_in, int const mask) { int value = d_in[threadIdx.x]; value = __shfl_xor (value, mask, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_xor_array(int *d_out, int *d_in, int const mask) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; value[0] = __shfl_xor (value[0], mask, BDIMX); value[1] = __shfl_xor (value[1], mask, BDIMX); value[2] = __shfl_xor (value[2], mask, BDIMX); value[3] = __shfl_xor (value[3], mask, BDIMX); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_xor_int4(int *d_out, int *d_in, int const mask) { int idx = threadIdx.x * SEGM; int4 value; value.x = d_in[idx]; value.y = d_in[idx + 1]; value.z = d_in[idx + 2]; value.w = d_in[idx + 3]; value.x = __shfl_xor (value.x, mask, BDIMX); value.y = __shfl_xor (value.y, mask, BDIMX); value.z = __shfl_xor (value.z, mask, BDIMX); value.w = __shfl_xor (value.w, mask, BDIMX); d_out[idx] = value.x; d_out[idx + 1] = value.y; d_out[idx + 2] = value.z; d_out[idx + 3] = value.w; } __global__ void test_shfl_xor_element(int *d_out, int *d_in, int const mask, int srcIdx, int dstIdx) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; value[srcIdx] = __shfl_xor (value[dstIdx], mask, BDIMX); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_xor_array_swap (int *d_out, int *d_in, int const mask, int srcIdx, int dstIdx) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; bool pred = ((threadIdx.x & 1) != mask); if (pred) { int tmp = value[srcIdx]; value[srcIdx] = value[dstIdx]; value[dstIdx] = tmp; } value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX); if (pred) { int tmp = value[srcIdx]; value[srcIdx] = value[dstIdx]; value[dstIdx] = tmp; } for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __inline__ __device__ void swap_old(int *value, int tid, int mask, int srcIdx, int dstIdx) { bool pred = ((tid / mask + 1) == 1); if (pred) { int tmp = value[srcIdx]; value[srcIdx] = value[dstIdx]; value[dstIdx] = tmp; } value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX); if (pred) { int tmp = value[srcIdx]; value[srcIdx] = value[dstIdx]; value[dstIdx] = tmp; } } __inline__ __device__ void swap(int *value, int laneIdx, int mask, int firstIdx, int secondIdx) { bool pred = ((laneIdx / mask + 1) == 1); if (pred) { int tmp = value[firstIdx]; value[firstIdx] = value[secondIdx]; value[secondIdx] = tmp; } value[secondIdx] = __shfl_xor (value[secondIdx], mask, BDIMX); if (pred) { int tmp = value[firstIdx]; value[firstIdx] = value[secondIdx]; value[secondIdx] = tmp; } } __global__ void test_shfl_swap_old (int *d_out, int *d_in, int const mask, int srcIdx, int dstIdx) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; swap(value, threadIdx.x, mask, srcIdx, dstIdx); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_swap (int *d_out, int *d_in, int const mask, int firstIdx, int secondIdx) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; swap(value, threadIdx.x, mask, firstIdx, secondIdx); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_xor_array_swap_base (int *d_out, int *d_in, int const mask, int srcIdx, int dstIdx) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_array(int *d_out, int *d_in, int const offset) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; int lane = (offset + threadIdx.x) % SEGM; value[0] = __shfl (value[3], lane, BDIMX); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_xor_plus(int *d_out, int *d_in, int const mask) { int value = d_in[threadIdx.x]; value += __shfl_xor (value, mask, BDIMX); d_out[threadIdx.x] = value; } int main(int argc, char **argv) { int dev = 0; bool iPrintout = 1; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("> %s Starting.", argv[0]); printf("at Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); int nElem = BDIMX; int h_inData[BDIMX], h_outData[BDIMX]; int h_pattern[] = {2,1,0,5,4,3,8,7,6,11,10,9,14,13,12,15}; for (int i = 0; i < nElem; i++) h_inData[i] = i; if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); printf("pattern\t\t: "); printData(h_pattern, nElem); } size_t nBytes = nElem * sizeof(int); int *d_inData, *d_outData; int *d_pattern; CHECK(hipMalloc((int**)&d_inData, nBytes)); CHECK(hipMalloc((int**)&d_outData, nBytes)); CHECK(hipMalloc((int**)&d_pattern, nBytes)); CHECK(hipMemcpy(d_inData, h_inData, nBytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_pattern, h_pattern, nBytes, hipMemcpyHostToDevice)); int block = BDIMX; // shfl bcast hipLaunchKernelGGL(( test_shfl_broadcast), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("shfl bcast\t\t: "); printData(h_outData, nElem); } // shfl offset hipLaunchKernelGGL(( test_shfl_wrap), dim3(1), dim3(block), 0, 0, d_outData, d_inData, -2); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("shfl wrap right\t\t: "); printData(h_outData, nElem); } // shfl up hipLaunchKernelGGL(( test_shfl_up), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("shfl up \t\t: "); printData(h_outData, nElem); } // shfl offset hipLaunchKernelGGL(( test_shfl_wrap), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("shfl wrap left\t\t: "); printData(h_outData, nElem); } // shfl offset hipLaunchKernelGGL(( test_shfl_wrap), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("shfl wrap 2\t\t: "); printData(h_outData, nElem); } // shfl down hipLaunchKernelGGL(( test_shfl_down), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 2); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("shfl down \t\t: "); printData(h_outData, nElem); } // shfl xor hipLaunchKernelGGL(( test_shfl_xor), dim3(1), dim3(block), 0, 0, d_outData, d_inData, 1); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("shfl xor 1\t\t: "); printData(h_outData, nElem); } hipLaunchKernelGGL(( test_shfl_xor), dim3(1), dim3(block), 0, 0, d_outData, d_inData, -8); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("shfl xor -1\t\t: "); printData(h_outData, nElem); } // shfl xor - int4 hipLaunchKernelGGL(( test_shfl_xor_int4), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("shfl int4 1\t\t: "); printData(h_outData, nElem); } // shfl xor - register array hipLaunchKernelGGL(( test_shfl_xor_array), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("shfl array 1\t\t: "); printData(h_outData, nElem); } // shfl xor - test_shfl_xor_element hipLaunchKernelGGL(( test_shfl_xor_element), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1, 0, 3); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("shfl idx \t\t: "); printData(h_outData, nElem); } // shfl xor - swap hipLaunchKernelGGL(( test_shfl_xor_array_swap_base), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1, 0, 3); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("shfl swap base\t\t: "); printData(h_outData, nElem); } // shfl xor - swap hipLaunchKernelGGL(( test_shfl_xor_array_swap), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1, 0, 3); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("shfl swap 0 3\t\t: "); printData(h_outData, nElem); } // shfl xor - swap hipLaunchKernelGGL(( test_shfl_swap), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1, 0, 3); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("shfl swap inline\t: "); printData(h_outData, nElem); } // shfl xor - register array hipLaunchKernelGGL(( test_shfl_array), dim3(1), dim3(block / SEGM), 0, 0, d_outData, d_inData, 1); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("shfl array \t\t: "); printData(h_outData, nElem); } // shfl xor - register array hipLaunchKernelGGL(( test_shfl_pattern), dim3(1), dim3(block), 0, 0, d_outData, d_inData, d_pattern); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_outData, d_outData, nBytes, hipMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("pattern array \t\t: "); printData(h_outData, nElem); } // finishing CHECK(hipFree(d_inData)); CHECK(hipFree(d_outData)); CHECK(hipDeviceReset(); ); return EXIT_SUCCESS; }
798d7b314ad0e76a22e67230e81a0d4a682cc1e6.cu
#include "../common/common.h" #include <stdio.h> #include <cuda_runtime.h> /* * This file includes simple demonstrations of a variety of shuffle * instructions. */ #define BDIMX 16 #define SEGM 4 void printData(int *in, const int size) { for (int i = 0; i < size; i++) { printf("%2d ", in[i]); } printf("\n"); } __global__ void test_shfl_broadcast(int *d_out, int *d_in, int const srcLane) { int value = d_in[threadIdx.x]; value = __shfl(value, srcLane, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_pattern (int *d_out, int *d_in, int *offset) { int value = d_in[threadIdx.x]; value = __shfl(value, offset[threadIdx.x], BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_wrap (int *d_out, int *d_in, int const offset) { int value = d_in[threadIdx.x]; value = __shfl(value, threadIdx.x + offset, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_wrap_plus (int *d_out, int *d_in, int const offset) { int value = d_in[threadIdx.x]; value += __shfl(value, threadIdx.x + offset, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_up(int *d_out, int *d_in, unsigned int const delta) { int value = d_in[threadIdx.x]; value = __shfl_up (value, delta, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_down(int *d_out, int *d_in, unsigned int const delta) { int value = d_in[threadIdx.x]; value = __shfl_down (value, delta, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_xor(int *d_out, int *d_in, int const mask) { int value = d_in[threadIdx.x]; value = __shfl_xor (value, mask, BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_xor_array(int *d_out, int *d_in, int const mask) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; value[0] = __shfl_xor (value[0], mask, BDIMX); value[1] = __shfl_xor (value[1], mask, BDIMX); value[2] = __shfl_xor (value[2], mask, BDIMX); value[3] = __shfl_xor (value[3], mask, BDIMX); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_xor_int4(int *d_out, int *d_in, int const mask) { int idx = threadIdx.x * SEGM; int4 value; value.x = d_in[idx]; value.y = d_in[idx + 1]; value.z = d_in[idx + 2]; value.w = d_in[idx + 3]; value.x = __shfl_xor (value.x, mask, BDIMX); value.y = __shfl_xor (value.y, mask, BDIMX); value.z = __shfl_xor (value.z, mask, BDIMX); value.w = __shfl_xor (value.w, mask, BDIMX); d_out[idx] = value.x; d_out[idx + 1] = value.y; d_out[idx + 2] = value.z; d_out[idx + 3] = value.w; } __global__ void test_shfl_xor_element(int *d_out, int *d_in, int const mask, int srcIdx, int dstIdx) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; value[srcIdx] = __shfl_xor (value[dstIdx], mask, BDIMX); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_xor_array_swap (int *d_out, int *d_in, int const mask, int srcIdx, int dstIdx) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; bool pred = ((threadIdx.x & 1) != mask); if (pred) { int tmp = value[srcIdx]; value[srcIdx] = value[dstIdx]; value[dstIdx] = tmp; } value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX); if (pred) { int tmp = value[srcIdx]; value[srcIdx] = value[dstIdx]; value[dstIdx] = tmp; } for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __inline__ __device__ void swap_old(int *value, int tid, int mask, int srcIdx, int dstIdx) { bool pred = ((tid / mask + 1) == 1); if (pred) { int tmp = value[srcIdx]; value[srcIdx] = value[dstIdx]; value[dstIdx] = tmp; } value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX); if (pred) { int tmp = value[srcIdx]; value[srcIdx] = value[dstIdx]; value[dstIdx] = tmp; } } __inline__ __device__ void swap(int *value, int laneIdx, int mask, int firstIdx, int secondIdx) { bool pred = ((laneIdx / mask + 1) == 1); if (pred) { int tmp = value[firstIdx]; value[firstIdx] = value[secondIdx]; value[secondIdx] = tmp; } value[secondIdx] = __shfl_xor (value[secondIdx], mask, BDIMX); if (pred) { int tmp = value[firstIdx]; value[firstIdx] = value[secondIdx]; value[secondIdx] = tmp; } } __global__ void test_shfl_swap_old (int *d_out, int *d_in, int const mask, int srcIdx, int dstIdx) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; swap(value, threadIdx.x, mask, srcIdx, dstIdx); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_swap (int *d_out, int *d_in, int const mask, int firstIdx, int secondIdx) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; swap(value, threadIdx.x, mask, firstIdx, secondIdx); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_xor_array_swap_base (int *d_out, int *d_in, int const mask, int srcIdx, int dstIdx) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; value[dstIdx] = __shfl_xor (value[dstIdx], mask, BDIMX); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_array(int *d_out, int *d_in, int const offset) { int idx = threadIdx.x * SEGM; int value[SEGM]; for (int i = 0; i < SEGM; i++) value[i] = d_in[idx + i]; int lane = (offset + threadIdx.x) % SEGM; value[0] = __shfl (value[3], lane, BDIMX); for (int i = 0; i < SEGM; i++) d_out[idx + i] = value[i]; } __global__ void test_shfl_xor_plus(int *d_out, int *d_in, int const mask) { int value = d_in[threadIdx.x]; value += __shfl_xor (value, mask, BDIMX); d_out[threadIdx.x] = value; } int main(int argc, char **argv) { int dev = 0; bool iPrintout = 1; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("> %s Starting.", argv[0]); printf("at Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); int nElem = BDIMX; int h_inData[BDIMX], h_outData[BDIMX]; int h_pattern[] = {2,1,0,5,4,3,8,7,6,11,10,9,14,13,12,15}; for (int i = 0; i < nElem; i++) h_inData[i] = i; if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); printf("pattern\t\t: "); printData(h_pattern, nElem); } size_t nBytes = nElem * sizeof(int); int *d_inData, *d_outData; int *d_pattern; CHECK(cudaMalloc((int**)&d_inData, nBytes)); CHECK(cudaMalloc((int**)&d_outData, nBytes)); CHECK(cudaMalloc((int**)&d_pattern, nBytes)); CHECK(cudaMemcpy(d_inData, h_inData, nBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_pattern, h_pattern, nBytes, cudaMemcpyHostToDevice)); int block = BDIMX; // shfl bcast test_shfl_broadcast<<<1, block>>>(d_outData, d_inData, 2); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("shfl bcast\t\t: "); printData(h_outData, nElem); } // shfl offset test_shfl_wrap<<<1, block>>>(d_outData, d_inData, -2); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("shfl wrap right\t\t: "); printData(h_outData, nElem); } // shfl up test_shfl_up<<<1, block>>>(d_outData, d_inData, 2); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("shfl up \t\t: "); printData(h_outData, nElem); } // shfl offset test_shfl_wrap<<<1, block>>>(d_outData, d_inData, 2); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("shfl wrap left\t\t: "); printData(h_outData, nElem); } // shfl offset test_shfl_wrap<<<1, block>>>(d_outData, d_inData, 2); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("shfl wrap 2\t\t: "); printData(h_outData, nElem); } // shfl down test_shfl_down<<<1, block>>>(d_outData, d_inData, 2); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("shfl down \t\t: "); printData(h_outData, nElem); } // shfl xor test_shfl_xor<<<1, block>>>(d_outData, d_inData, 1); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("shfl xor 1\t\t: "); printData(h_outData, nElem); } test_shfl_xor<<<1, block>>>(d_outData, d_inData, -8); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("shfl xor -1\t\t: "); printData(h_outData, nElem); } // shfl xor - int4 test_shfl_xor_int4<<<1, block / SEGM>>>(d_outData, d_inData, 1); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("shfl int4 1\t\t: "); printData(h_outData, nElem); } // shfl xor - register array test_shfl_xor_array<<<1, block / SEGM>>>(d_outData, d_inData, 1); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("shfl array 1\t\t: "); printData(h_outData, nElem); } // shfl xor - test_shfl_xor_element test_shfl_xor_element<<<1, block / SEGM>>>(d_outData, d_inData, 1, 0, 3); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("shfl idx \t\t: "); printData(h_outData, nElem); } // shfl xor - swap test_shfl_xor_array_swap_base<<<1, block / SEGM>>>(d_outData, d_inData, 1, 0, 3); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("shfl swap base\t\t: "); printData(h_outData, nElem); } // shfl xor - swap test_shfl_xor_array_swap<<<1, block / SEGM>>>(d_outData, d_inData, 1, 0, 3); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("shfl swap 0 3\t\t: "); printData(h_outData, nElem); } // shfl xor - swap test_shfl_swap<<<1, block / SEGM>>>(d_outData, d_inData, 1, 0, 3); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("shfl swap inline\t: "); printData(h_outData, nElem); } // shfl xor - register array test_shfl_array<<<1, block / SEGM>>>(d_outData, d_inData, 1); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("shfl array \t\t: "); printData(h_outData, nElem); } // shfl xor - register array test_shfl_pattern<<<1, block>>>(d_outData, d_inData, d_pattern); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_outData, d_outData, nBytes, cudaMemcpyDeviceToHost)); if(iPrintout) { printf("initialData\t\t: "); printData(h_inData, nElem); } if(iPrintout) { printf("pattern array \t\t: "); printData(h_outData, nElem); } // finishing CHECK(cudaFree(d_inData)); CHECK(cudaFree(d_outData)); CHECK(cudaDeviceReset(); ); return EXIT_SUCCESS; }
ce60497259db163b8aa396f40ff219a76ad5e3aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // Copyright (c) 2020, Michael Kunz. All rights reserved. // https://github.com/kunzmi/ImageStackAlignator // // This file is part of ImageStackAlignator. // // ImageStackAlignator is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as // published by the Free Software Foundation, version 3. // // ImageStackAlignator is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, // MA 02110-1301 USA, http://www.gnu.org/licenses/. //squared sum of a tile without the border extern "C" //Boxfilter ignoring the border parts //blockDim.X must be tileSize + 2 * maxShift //blockDim.Y must be 1 extern "C" //Boxfilter ignoring the border parts //blockDim.Y must be tileSize + 2 * maxShift //blockDim.X must be 1 extern "C" //Computed the normalized CC values out of the different input data //Cross correlation is fft shifted //blockDim.X must be 2 * maxShift //blockDim.Y must be 2 * maxShift //blockDim.Z must be nr of tiles extern "C" //Convert a tiled image into consecutive tiles for FFT //input img has a pitch, output tiles are consecutive //output tiles overlap by maxShift is filled by zero extern "C" //Convert a tiled image into consecutive tiles for FFT //input img has a pitch, output tiles are consecutive //output tiles overlap by maxShift on each side extern "C" __device__ float applysRGBGamma(float valIn) { if (valIn <= 0.0031308f) { return 12.92f * valIn; } else { return (1.0f + 0.055f) * powf(valIn, 1.0f / 2.4f) - 0.055f; } } __global__ void GammasRGB( float3 * __restrict__ inOutImg, int imgWidth, int imgHeight, int imgPitch) { int pxX = blockIdx.x * blockDim.x + threadIdx.x; int pxY = blockIdx.y * blockDim.y + threadIdx.y; if (pxX >= imgWidth || pxY >= imgHeight) return; float3 val = *(((float3*)((char*)inOutImg + imgPitch * pxY)) + pxX); //apply gamma: if (isnan(val.x)) val.x = 0; if (isnan(val.y)) val.y = 0; if (isnan(val.z)) val.z = 0; val.x = fmaxf(fminf(val.x, 1.0f), 0.0f); val.y = fmaxf(fminf(val.y, 1.0f), 0.0f); val.z = fmaxf(fminf(val.z, 1.0f), 0.0f); val.x = applysRGBGamma(val.x); val.y = applysRGBGamma(val.y); val.z = applysRGBGamma(val.z); *(((float3*)((char*)inOutImg + imgPitch * pxY)) + pxX) = val; }
ce60497259db163b8aa396f40ff219a76ad5e3aa.cu
#include "includes.h" // Copyright (c) 2020, Michael Kunz. All rights reserved. // https://github.com/kunzmi/ImageStackAlignator // // This file is part of ImageStackAlignator. // // ImageStackAlignator is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as // published by the Free Software Foundation, version 3. // // ImageStackAlignator is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, // MA 02110-1301 USA, http://www.gnu.org/licenses/. //squared sum of a tile without the border extern "C" //Boxfilter ignoring the border parts //blockDim.X must be tileSize + 2 * maxShift //blockDim.Y must be 1 extern "C" //Boxfilter ignoring the border parts //blockDim.Y must be tileSize + 2 * maxShift //blockDim.X must be 1 extern "C" //Computed the normalized CC values out of the different input data //Cross correlation is fft shifted //blockDim.X must be 2 * maxShift //blockDim.Y must be 2 * maxShift //blockDim.Z must be nr of tiles extern "C" //Convert a tiled image into consecutive tiles for FFT //input img has a pitch, output tiles are consecutive //output tiles overlap by maxShift is filled by zero extern "C" //Convert a tiled image into consecutive tiles for FFT //input img has a pitch, output tiles are consecutive //output tiles overlap by maxShift on each side extern "C" __device__ float applysRGBGamma(float valIn) { if (valIn <= 0.0031308f) { return 12.92f * valIn; } else { return (1.0f + 0.055f) * powf(valIn, 1.0f / 2.4f) - 0.055f; } } __global__ void GammasRGB( float3 * __restrict__ inOutImg, int imgWidth, int imgHeight, int imgPitch) { int pxX = blockIdx.x * blockDim.x + threadIdx.x; int pxY = blockIdx.y * blockDim.y + threadIdx.y; if (pxX >= imgWidth || pxY >= imgHeight) return; float3 val = *(((float3*)((char*)inOutImg + imgPitch * pxY)) + pxX); //apply gamma: if (isnan(val.x)) val.x = 0; if (isnan(val.y)) val.y = 0; if (isnan(val.z)) val.z = 0; val.x = fmaxf(fminf(val.x, 1.0f), 0.0f); val.y = fmaxf(fminf(val.y, 1.0f), 0.0f); val.z = fmaxf(fminf(val.z, 1.0f), 0.0f); val.x = applysRGBGamma(val.x); val.y = applysRGBGamma(val.y); val.z = applysRGBGamma(val.z); *(((float3*)((char*)inOutImg + imgPitch * pxY)) + pxX) = val; }
188593b1843c790b1baf94dbabfc75fc475b3be8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" #include "SharedMem.cuh" template <typename T, typename AccumT> __global__ void cunn_SpatialLogSoftMax_updateOutput_kernel(T *output, T *input, int classSize, int height, int width) { int batchIndex = blockIdx.x; int index = threadIdx.x; while (index < height*width) { int y = index / width; int x = index % width; if (y >= height) break; // calculate input starting index in cuda layout (B x H x W x C) int inputStartIndex = (height*width*classSize)*batchIndex + (width*classSize)*y + (classSize)*x; AccumT sum = 0; for (int i = 0; i < classSize; i++) { sum += THCNumerics<T>::exp(input[inputStartIndex + i]); } sum = AccumT(1) / sum; for (int i = 0; i < classSize; i++) { // calculate output index in torch layout (B x C x H x W) int outputIndex = (classSize*height*width)*batchIndex + (height*width)*i + (width)*y + x; output[outputIndex] = ScalarConvert<AccumT, T>::to( THCNumerics<AccumT>::log(sum * THCNumerics<T>::exp(input[inputStartIndex + i]))); } index += blockDim.x; } } template <typename T, typename AccumT> __global__ void cunn_SpatialLogSoftMax_updateGradInput_kernel(T *gradInput, T *output, T *gradOutput, int classSize, int height, int width) { int batchIndex = blockIdx.x; int index = threadIdx.x; while (index < height*width) { int y = index / width; int x = index % width; if (y >= height) break; // calculate output starting index in cuda layout (B x H x W x C) int outputStartIndex = (height*width*classSize)*batchIndex + (width*classSize)*y + (classSize)*x; AccumT sum = 0; for (int i = 0; i < classSize; i++) { sum += gradOutput[outputStartIndex + i]; } for (int i = 0; i < classSize; i++) { // calculate input index in torch layout (B x C x H x W) int inputIndex = (classSize*height*width)*batchIndex + (height*width)*i + (width)*y + x; gradInput[inputIndex] = ScalarConvert<AccumT, T>::to( gradOutput[outputStartIndex + i] - THCNumerics<T>::exp(output[outputStartIndex + i]) * sum); } index += blockDim.x; } } template <typename T, typename AccumT> struct MaxFloat { __device__ __forceinline__ AccumT operator()(AccumT max, T v) const { return fmaxType(max, v); } }; template<typename T, typename AccumT> struct SumFloat { __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + v; } }; template<typename T, typename AccumT> struct SumExpFloat { __device__ __forceinline__ SumExpFloat(T v) : max_k(v) {} __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + THCNumerics<T>::exp(v - max_k); } const T max_k; }; template<typename AccumT> struct NoFinal { __device__ __forceinline__ AccumT operator()(AccumT v) const { return v; } }; template<typename AccumT> struct LSMFinal { __device__ __forceinline__ LSMFinal(AccumT m) : max_k(m) {} __device__ __forceinline__ AccumT operator()(AccumT v) const { return max_k + THCNumerics<AccumT>::log(v); } const AccumT max_k; }; template <template<typename, typename> class Reduction, template<typename> class Finalize, typename AccumT> __device__ __forceinline__ AccumT blockReduce(AccumT* smem, AccumT val, const Reduction<AccumT, AccumT>& r, AccumT defaultVal, const Finalize<AccumT>& f) { // To avoid RaW races from chaining blockReduce calls together, we // need a sync here __syncthreads(); smem[threadIdx.x] = val; __syncthreads(); AccumT warpVal = defaultVal; // First warp will perform per-warp reductions for the remaining warps if ((threadIdx.x / 32) == 0) // only threads in warp1 go into this (if) { int lane = threadIdx.x % 32; // from 0 to 31 // if less than 1024 threads per block, then only activate the relevant lanes if (lane < blockDim.x / 32) { #pragma unroll for (int i = 0; i < 32; ++i) { warpVal = r(warpVal, smem[lane * 32 + i]); } smem[lane] = warpVal; } } __syncthreads(); // First thread will perform a reduction of the above per-warp reductions AccumT blockVal = defaultVal; if (threadIdx.x == 0) { for (int i = 0; i < blockDim.x / 32; ++i) { blockVal = r(blockVal, smem[i]); } smem[0] = f(blockVal); } // Sync and broadcast __syncthreads(); return smem[0]; } template <template<typename, typename> class Reduction, typename AccumT> __device__ __forceinline__ AccumT blockReduce(AccumT* smem, AccumT val, const Reduction<AccumT, AccumT>& r, AccumT defaultVal) { return blockReduce<Reduction, NoFinal, AccumT>(smem, val, r, defaultVal, NoFinal<AccumT>()); } template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT> __device__ __forceinline__ AccumT ilpReduce(T* data, int size, const Reduction<T, AccumT>& r, AccumT defaultVal) { AccumT threadVal = defaultVal; int offset = threadIdx.x; int last = size % (ILP * blockDim.x); // Body (unroll by ILP times) for (; offset < size - last; offset += blockDim.x * ILP) { T tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmp[j] = data[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) { threadVal = r(threadVal, tmp[j]); } } // Epilogue for (; offset < size; offset += blockDim.x) { threadVal = r(threadVal, data[offset]); } return threadVal; } template <int ILP, typename T, typename AccumT> __global__ void cunn_LogSoftMax_updateOutput_kernel(T *output, T *input, int classes) { SharedMem<AccumT> smem; AccumT *buffer = smem.getPointer(); // forward pointers to batch[blockIdx.x] // each block handles a sample in the mini-batch input += blockIdx.x * classes; output += blockIdx.x * classes; // find the max of the batch AccumT threadMax = ilpReduce<MaxFloat, ILP, T, AccumT>( input, classes, MaxFloat<T, AccumT>(), -THCNumerics<AccumT>::max()); // find the max over all batches AccumT max_k = blockReduce<MaxFloat, AccumT>( buffer, threadMax, MaxFloat<AccumT, AccumT>(), -THCNumerics<AccumT>::max()); T max_k_non_accum = ScalarConvert<AccumT, T>::to(max_k); AccumT threadExp = ilpReduce<SumExpFloat, ILP, T, AccumT>( input, classes, SumExpFloat<T, AccumT>(max_k_non_accum), AccumT(0)); T logsum_k = ScalarConvert<AccumT, T>::to( blockReduce<SumFloat, LSMFinal, AccumT>( buffer, threadExp, SumFloat<AccumT, AccumT>(), AccumT(0), LSMFinal<AccumT>(max_k))); // Output LSM (hand ILP) int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { T tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmp[j] = input[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) { output[offset + j * blockDim.x] = tmp[j] - logsum_k; } } for (; offset < classes; offset += blockDim.x) { output[offset] = input[offset] - logsum_k; } } template <int ILP, typename T, typename AccumT> __global__ void cunn_LogSoftMax_updateGradInput_kernel(T *gradInput, T *output, T *gradOutput, int classes) { SharedMem<AccumT> smem; AccumT *buffer = smem.getPointer(); gradInput += blockIdx.x * classes; output += blockIdx.x * classes; gradOutput += blockIdx.x * classes; AccumT threadSum = ilpReduce<SumFloat, 4, T, AccumT>( gradOutput, classes, SumFloat<T, AccumT>(), AccumT(0)); T sum_k = ScalarConvert<AccumT, T>::to( blockReduce<SumFloat, AccumT>( buffer, threadSum, SumFloat<AccumT, AccumT>(), AccumT(0))); // Update gradInput (hand ILP) int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { T tmpGradOutput[ILP]; T tmpOutput[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmpGradOutput[j] = gradOutput[offset + j * blockDim.x]; tmpOutput[j] = output[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) { gradInput[offset + j * blockDim.x] = tmpGradOutput[j] - THCNumerics<T>::exp(tmpOutput[j]) * sum_k; } } for (; offset < classes; offset += blockDim.x) { gradInput[offset] = gradOutput[offset] - THCNumerics<T>::exp(output[offset]) * sum_k; } } #include "generic/LogSoftMax.cu" #include "THHGenerateFloatTypes.h"
188593b1843c790b1baf94dbabfc75fc475b3be8.cu
#include "THCUNN.h" #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" #include "SharedMem.cuh" template <typename T, typename AccumT> __global__ void cunn_SpatialLogSoftMax_updateOutput_kernel(T *output, T *input, int classSize, int height, int width) { int batchIndex = blockIdx.x; int index = threadIdx.x; while (index < height*width) { int y = index / width; int x = index % width; if (y >= height) break; // calculate input starting index in cuda layout (B x H x W x C) int inputStartIndex = (height*width*classSize)*batchIndex + (width*classSize)*y + (classSize)*x; AccumT sum = 0; for (int i = 0; i < classSize; i++) { sum += THCNumerics<T>::exp(input[inputStartIndex + i]); } sum = AccumT(1) / sum; for (int i = 0; i < classSize; i++) { // calculate output index in torch layout (B x C x H x W) int outputIndex = (classSize*height*width)*batchIndex + (height*width)*i + (width)*y + x; output[outputIndex] = ScalarConvert<AccumT, T>::to( THCNumerics<AccumT>::log(sum * THCNumerics<T>::exp(input[inputStartIndex + i]))); } index += blockDim.x; } } template <typename T, typename AccumT> __global__ void cunn_SpatialLogSoftMax_updateGradInput_kernel(T *gradInput, T *output, T *gradOutput, int classSize, int height, int width) { int batchIndex = blockIdx.x; int index = threadIdx.x; while (index < height*width) { int y = index / width; int x = index % width; if (y >= height) break; // calculate output starting index in cuda layout (B x H x W x C) int outputStartIndex = (height*width*classSize)*batchIndex + (width*classSize)*y + (classSize)*x; AccumT sum = 0; for (int i = 0; i < classSize; i++) { sum += gradOutput[outputStartIndex + i]; } for (int i = 0; i < classSize; i++) { // calculate input index in torch layout (B x C x H x W) int inputIndex = (classSize*height*width)*batchIndex + (height*width)*i + (width)*y + x; gradInput[inputIndex] = ScalarConvert<AccumT, T>::to( gradOutput[outputStartIndex + i] - THCNumerics<T>::exp(output[outputStartIndex + i]) * sum); } index += blockDim.x; } } template <typename T, typename AccumT> struct MaxFloat { __device__ __forceinline__ AccumT operator()(AccumT max, T v) const { return fmaxType(max, v); } }; template<typename T, typename AccumT> struct SumFloat { __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + v; } }; template<typename T, typename AccumT> struct SumExpFloat { __device__ __forceinline__ SumExpFloat(T v) : max_k(v) {} __device__ __forceinline__ AccumT operator()(AccumT sum, T v) const { return sum + THCNumerics<T>::exp(v - max_k); } const T max_k; }; template<typename AccumT> struct NoFinal { __device__ __forceinline__ AccumT operator()(AccumT v) const { return v; } }; template<typename AccumT> struct LSMFinal { __device__ __forceinline__ LSMFinal(AccumT m) : max_k(m) {} __device__ __forceinline__ AccumT operator()(AccumT v) const { return max_k + THCNumerics<AccumT>::log(v); } const AccumT max_k; }; template <template<typename, typename> class Reduction, template<typename> class Finalize, typename AccumT> __device__ __forceinline__ AccumT blockReduce(AccumT* smem, AccumT val, const Reduction<AccumT, AccumT>& r, AccumT defaultVal, const Finalize<AccumT>& f) { // To avoid RaW races from chaining blockReduce calls together, we // need a sync here __syncthreads(); smem[threadIdx.x] = val; __syncthreads(); AccumT warpVal = defaultVal; // First warp will perform per-warp reductions for the remaining warps if ((threadIdx.x / 32) == 0) // only threads in warp1 go into this (if) { int lane = threadIdx.x % 32; // from 0 to 31 // if less than 1024 threads per block, then only activate the relevant lanes if (lane < blockDim.x / 32) { #pragma unroll for (int i = 0; i < 32; ++i) { warpVal = r(warpVal, smem[lane * 32 + i]); } smem[lane] = warpVal; } } __syncthreads(); // First thread will perform a reduction of the above per-warp reductions AccumT blockVal = defaultVal; if (threadIdx.x == 0) { for (int i = 0; i < blockDim.x / 32; ++i) { blockVal = r(blockVal, smem[i]); } smem[0] = f(blockVal); } // Sync and broadcast __syncthreads(); return smem[0]; } template <template<typename, typename> class Reduction, typename AccumT> __device__ __forceinline__ AccumT blockReduce(AccumT* smem, AccumT val, const Reduction<AccumT, AccumT>& r, AccumT defaultVal) { return blockReduce<Reduction, NoFinal, AccumT>(smem, val, r, defaultVal, NoFinal<AccumT>()); } template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT> __device__ __forceinline__ AccumT ilpReduce(T* data, int size, const Reduction<T, AccumT>& r, AccumT defaultVal) { AccumT threadVal = defaultVal; int offset = threadIdx.x; int last = size % (ILP * blockDim.x); // Body (unroll by ILP times) for (; offset < size - last; offset += blockDim.x * ILP) { T tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmp[j] = data[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) { threadVal = r(threadVal, tmp[j]); } } // Epilogue for (; offset < size; offset += blockDim.x) { threadVal = r(threadVal, data[offset]); } return threadVal; } template <int ILP, typename T, typename AccumT> __global__ void cunn_LogSoftMax_updateOutput_kernel(T *output, T *input, int classes) { SharedMem<AccumT> smem; AccumT *buffer = smem.getPointer(); // forward pointers to batch[blockIdx.x] // each block handles a sample in the mini-batch input += blockIdx.x * classes; output += blockIdx.x * classes; // find the max of the batch AccumT threadMax = ilpReduce<MaxFloat, ILP, T, AccumT>( input, classes, MaxFloat<T, AccumT>(), -THCNumerics<AccumT>::max()); // find the max over all batches AccumT max_k = blockReduce<MaxFloat, AccumT>( buffer, threadMax, MaxFloat<AccumT, AccumT>(), -THCNumerics<AccumT>::max()); T max_k_non_accum = ScalarConvert<AccumT, T>::to(max_k); AccumT threadExp = ilpReduce<SumExpFloat, ILP, T, AccumT>( input, classes, SumExpFloat<T, AccumT>(max_k_non_accum), AccumT(0)); T logsum_k = ScalarConvert<AccumT, T>::to( blockReduce<SumFloat, LSMFinal, AccumT>( buffer, threadExp, SumFloat<AccumT, AccumT>(), AccumT(0), LSMFinal<AccumT>(max_k))); // Output LSM (hand ILP) int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { T tmp[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmp[j] = input[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) { output[offset + j * blockDim.x] = tmp[j] - logsum_k; } } for (; offset < classes; offset += blockDim.x) { output[offset] = input[offset] - logsum_k; } } template <int ILP, typename T, typename AccumT> __global__ void cunn_LogSoftMax_updateGradInput_kernel(T *gradInput, T *output, T *gradOutput, int classes) { SharedMem<AccumT> smem; AccumT *buffer = smem.getPointer(); gradInput += blockIdx.x * classes; output += blockIdx.x * classes; gradOutput += blockIdx.x * classes; AccumT threadSum = ilpReduce<SumFloat, 4, T, AccumT>( gradOutput, classes, SumFloat<T, AccumT>(), AccumT(0)); T sum_k = ScalarConvert<AccumT, T>::to( blockReduce<SumFloat, AccumT>( buffer, threadSum, SumFloat<AccumT, AccumT>(), AccumT(0))); // Update gradInput (hand ILP) int offset = threadIdx.x; int last = classes % (ILP * blockDim.x); for (; offset < classes - last; offset += blockDim.x * ILP) { T tmpGradOutput[ILP]; T tmpOutput[ILP]; #pragma unroll for (int j = 0; j < ILP; ++j) { tmpGradOutput[j] = gradOutput[offset + j * blockDim.x]; tmpOutput[j] = output[offset + j * blockDim.x]; } #pragma unroll for (int j = 0; j < ILP; ++j) { gradInput[offset + j * blockDim.x] = tmpGradOutput[j] - THCNumerics<T>::exp(tmpOutput[j]) * sum_k; } } for (; offset < classes; offset += blockDim.x) { gradInput[offset] = gradOutput[offset] - THCNumerics<T>::exp(output[offset]) * sum_k; } } #include "generic/LogSoftMax.cu" #include "THCGenerateFloatTypes.h"
513bd1bff5e1e12b4de09f011e5bd922496e7a70.hip
// !!! This is a file automatically generated by hipify!!! #include "main-pr.hpp" #define THROW_AWAY 1 #include "Padded2DArray.hpp" #include <omp.h> #include "memutils.hpp" #include <cmath> //#define SHOWLOADBALANCE #include "logged_array.hpp" //#define LOG #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <cusparse_v2.h> #include <rocblas.h> #include "helper_cuda.h" #include "math.h" //#include "streamUtils.hpp" #include "tbb/concurrent_queue.h" #include "AdaptativeUtils.hpp" template <typename VertexType, typename EdgeType, typename Scalar> int main_pr(VertexType nVtx, EdgeType* xadj_, VertexType *adj_, Scalar* val_, Scalar *prior_, Scalar* pr_, Scalar lambda, int nTry, //algo parameter util::timestamp& totaltime, std::string& out ) { int nb_blocks = 0; int blk_size = 0; int nb_threads = 0; int WGSIZE = 256; { char* str = getenv ("NBTHREAD"); if (str) { std::stringstream ss (str); ss>>nb_threads; if (!ss) std::cerr<<"NBTHREAD invalid"<<std::endl; } } { char* str = getenv ("NBBLOCK"); if (str) { std::stringstream ss (str); ss>>nb_blocks; if (!ss) std::cerr<<"NBBLOCK invalid"<<std::endl; } } { char* str = getenv ("BLKSIZE"); if (str) { std::stringstream ss (str); ss>>blk_size; if (!ss) std::cerr<<"SUBSIZE invalid"<<std::endl; } } { char* str = getenv ("WGSIZE"); if (str) { std::stringstream ss (str); ss>>WGSIZE; if (!ss) std::cerr<<"val invalid"<<std::endl; } } if(nb_threads == 0 ){ std::cerr<<" NBTHREAD=??? "<<std::endl; exit(0); } if(blk_size == 0 ){ std::cerr<<" BLKSIZE=??? "<<std::endl; exit(0); } if(nb_blocks == 0 ){ std::cerr<<" NBBLOCK=??? "<<std::endl; exit(0); } bool coldcache = true; util::timestamp start(0,0); //cpuside variables Scalar* prin_ = new Scalar[nVtx]; EdgeType* xadj = xadj_; VertexType *adj = adj_; Scalar* val = val_; Scalar* prior = prior_; Scalar* prin = prin_; Scalar* prout = pr_; Scalar alpha = lambda; Scalar beta = 1-lambda; Scalar alpha1 = lambda; Scalar beta1 = 1-lambda; Scalar epsalpha = -1; Scalar *h_eps0; Scalar *h_eps1; //cuda side variable EdgeType* d_xadj0 ; VertexType *d_adj0 ; Scalar* d_val0 ; Scalar* d_prior0 ; Scalar* d_prin0 ; Scalar* d_prout0 ; Scalar *d_alpha0; Scalar *d_beta0; Scalar *d_epsalpha0; Scalar *d_eps0; EdgeType* d_xadj1 ; VertexType *d_adj1 ; Scalar* d_val1 ; Scalar* d_prior1 ; Scalar* d_prin1 ; Scalar* d_prout1 ; Scalar *d_alpha1; Scalar *d_beta1; Scalar *d_epsalpha1; Scalar *d_eps1; /* Get handle to the CUBLAS context */ hipSetDevice(0); hipblasHandle_t cublasHandle0 = 0; hipblasStatus_t cublasStatus0; cublasStatus0 = hipblasCreate(&cublasHandle0); hipblasSetPointerMode(cublasHandle0, HIPBLAS_POINTER_MODE_DEVICE); checkCudaErrors( hipSetDevice(1)); hipblasHandle_t cublasHandle1 = 0; hipblasStatus_t cublasStatus1; cublasStatus1 = hipblasCreate(&cublasHandle1); hipblasSetPointerMode(cublasHandle1, HIPBLAS_POINTER_MODE_DEVICE); /* Get handle to the CUSPARSE context */ hipSetDevice(0); hipsparseHandle_t cusparseHandle0 = 0; hipsparseStatus_t cusparseStatus0; cusparseStatus0 = hipsparseCreate(&cusparseHandle0); hipsparseMatDescr_t descr0 = 0; cusparseStatus0 = hipsparseCreateMatDescr(&descr0); hipsparseSetMatType(descr0,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr0,HIPSPARSE_INDEX_BASE_ZERO); hipSetDevice(1); hipsparseHandle_t cusparseHandle1 = 0; hipsparseStatus_t cusparseStatus1; cusparseStatus1 = hipsparseCreate(&cusparseHandle1); hipsparseMatDescr_t descr1 = 0; cusparseStatus1 = hipsparseCreateMatDescr(&descr1); hipsparseSetMatType(descr1,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr1,HIPSPARSE_INDEX_BASE_ZERO); //cuda stream hipSetDevice(0); hipStream_t stream0; hipStreamCreate(&stream0); hipSetDevice(1); hipStream_t stream1; hipStreamCreate(&stream1); //cuda variable hipSetDevice(0); int *d_end; checkCudaErrors( hipMalloc((void**)&d_end, sizeof(*d_end)) ); hipDeviceEnablePeerAccess(1, 0); hipSetDevice(1); int *d_begin; checkCudaErrors( hipMalloc((void**)&d_begin, sizeof(*d_begin)) ); hipDeviceEnablePeerAccess(0, 0); //memalloc hipSetDevice(0); checkCudaErrors( hipMalloc((void**)&d_xadj0, (nVtx+1)*sizeof(*xadj)) ); checkCudaErrors( hipMalloc((void**)&d_adj0, (xadj[nVtx])*sizeof(*adj)) ); checkCudaErrors( hipMalloc((void**)&d_val0, (xadj[nVtx])*sizeof(*val)) ); checkCudaErrors( hipMalloc((void**)&d_prior0, (nVtx*sizeof(*prior)))); checkCudaErrors( hipMalloc((void**)&d_prin0, (nVtx*sizeof(*prin)) )); checkCudaErrors( hipMalloc((void**)&d_prout0, (nVtx*sizeof(*prout)) )); checkCudaErrors( hipMalloc((void**)&d_epsalpha0, (sizeof(epsalpha)) )); checkCudaErrors( hipHostMalloc((void**)&h_eps0, (sizeof(*h_eps0)) )); checkCudaErrors( hipMalloc((void**)&d_eps0, (sizeof(*h_eps0)) )); hipSetDevice(1); checkCudaErrors( hipMalloc((void**)&d_xadj1, (nVtx+1)*sizeof(*xadj)) ); checkCudaErrors( hipMalloc((void**)&d_adj1, (xadj[nVtx])*sizeof(*adj)) ); checkCudaErrors( hipMalloc((void**)&d_val1, (xadj[nVtx])*sizeof(*val)) ); checkCudaErrors( hipMalloc((void**)&d_prior1, (nVtx*sizeof(*prior)))); checkCudaErrors( hipMalloc((void**)&d_prin1, (nVtx*sizeof(*prin)) )); checkCudaErrors( hipMalloc((void**)&d_prout1, (nVtx*sizeof(*prout)) )); checkCudaErrors( hipMalloc((void**)&d_epsalpha1, (sizeof(epsalpha)) )); checkCudaErrors( hipHostMalloc((void**)&h_eps1, (sizeof(*h_eps1)) )); checkCudaErrors( hipMalloc((void**)&d_eps1, (sizeof(*h_eps1)) )); //cpu to gpu copies hipSetDevice(0); checkCudaErrors( hipMemcpy(d_xadj0, xadj, (nVtx+1)*sizeof(*xadj), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_adj0, adj, (xadj[nVtx])*sizeof(*adj), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_val0, val, (xadj[nVtx])*sizeof(*val), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_prior0, prior, nVtx*sizeof(*prior), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_epsalpha0, &epsalpha, sizeof(epsalpha), hipMemcpyHostToDevice) ); hipSetDevice(1); checkCudaErrors( hipMemcpy(d_xadj1, xadj, (nVtx+1)*sizeof(*xadj), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_adj1, adj, (xadj[nVtx])*sizeof(*adj), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_val1, val, (xadj[nVtx])*sizeof(*val), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_prior1, prior, nVtx*sizeof(*prior), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_epsalpha1, &epsalpha, sizeof(epsalpha), hipMemcpyHostToDevice) ); //tester PeerAcces int nRows = nVtx; unsigned long* rowBlocks; const int nThreadPerBlock = nb_threads; const unsigned int blkSize = blk_size; const unsigned int blkMultiplier = 3; const unsigned int rows_for_vector = 2; const bool allocate_row_blocks = true; //device 0 variable unsigned long* d_rowBlocks0; unsigned int* d_blkSize0; unsigned int* d_rows_for_vector0; unsigned int* d_blkMultiplier0; float* d_a0; float* d_b0; //device 1 variable unsigned long* d_rowBlocks1; unsigned int* d_blkSize1; unsigned int* d_rows_for_vector1; unsigned int* d_blkMultiplier1; float* d_a1; float* d_b1; int rowBlockSize1; int rowBlockSize2; //calculer rowBlockSize rowBlockSize1 = ComputeRowBlocksSize<int,int>(xadj, nVtx, blkSize, blkMultiplier, rows_for_vector, nThreadPerBlock); //cout << "rowBlockSize1 : " << rowBlockSize1 << endl; //declarer rowBlocks rowBlocks = (unsigned long*) calloc(sizeof(unsigned long),rowBlockSize1); rowBlockSize2 = rowBlockSize1; //calculer rowBlocks ComputeRowBlocks<int,int>( rowBlocks, rowBlockSize2, xadj, nVtx, blkSize, blkMultiplier, rows_for_vector, nThreadPerBlock, allocate_row_blocks); //cout << "rowBlockSize2 : " << rowBlockSize2 <<endl; // int end = ((rowBlocks[rowBlockSize1] >> (64-32)) & ((1UL << 32) - 1UL)); // cout << " end : " << end <<endl; cout << "rowBlockSize1 : " << rowBlockSize1 << endl; //malloc for device 0 variable hipSetDevice(0); checkCudaErrors( hipMalloc((void**)&d_rowBlocks0, (rowBlockSize1*sizeof(unsigned long)))); checkCudaErrors( hipMalloc((void**)&d_blkSize0, 1*sizeof(unsigned int))); checkCudaErrors( hipMalloc((void**)&d_rows_for_vector0,1*sizeof(unsigned int))); checkCudaErrors( hipMalloc((void**)&d_blkMultiplier0, 1*sizeof(unsigned int))); checkCudaErrors( hipMalloc((void**)&d_a0, 1*sizeof(float))); checkCudaErrors( hipMalloc((void**)&d_b0, 1*sizeof(float))); //malloc for device 1 variable hipSetDevice(1); checkCudaErrors( hipMalloc((void**)&d_rowBlocks1, (rowBlockSize1*sizeof(unsigned long)))); checkCudaErrors( hipMalloc((void**)&d_blkSize1, 1*sizeof(unsigned int))); checkCudaErrors( hipMalloc((void**)&d_rows_for_vector1,1*sizeof(unsigned int))); checkCudaErrors( hipMalloc((void**)&d_blkMultiplier1, 1*sizeof(unsigned int))); checkCudaErrors( hipMalloc((void**)&d_a1, 1*sizeof(float))); checkCudaErrors( hipMalloc((void**)&d_b1, 1*sizeof(float))); //send data to device 0 hipSetDevice(0); checkCudaErrors( hipMemcpy(d_rowBlocks0, rowBlocks, rowBlockSize1*sizeof(unsigned long), hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_blkSize0, &blkSize, 1*sizeof(unsigned int), hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_rows_for_vector0, &rows_for_vector, 1*sizeof(unsigned int), hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_blkMultiplier0, &blkMultiplier, 1*sizeof(unsigned int), hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_a0, &alpha, 1*sizeof(Scalar), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_b0, &beta, 1*sizeof(Scalar), hipMemcpyHostToDevice) ); //send data to device 1 hipSetDevice(1); checkCudaErrors( hipMemcpy(d_rowBlocks1, rowBlocks, rowBlockSize1*sizeof(unsigned long), hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_blkSize1, &blkSize, 1*sizeof(unsigned int), hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_rows_for_vector1, &rows_for_vector, 1*sizeof(unsigned int), hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_blkMultiplier1, &blkMultiplier, 1*sizeof(unsigned int), hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_a1, &alpha, 1*sizeof(Scalar), hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_b1, &beta, 1*sizeof(Scalar), hipMemcpyHostToDevice) ); // prepar stream config list<Task> *tasks = new list<Task>; tbb::concurrent_bounded_queue<stream_container<int,int,float>* >* streams = new tbb::concurrent_bounded_queue<stream_container<int,int,float>* >; int mmshared_size = (blkSize + 1) * sizeof(float); // int nb_blocks = 512; int stream_number = 2; int X, subsize; X = (int) rowBlockSize1/(nb_blocks) ; if(X > 32){ if(X % 32 == 0){ subsize = X; }else{ X = X / 32 ; subsize = (X+1) * 32; } }else{ if(rowBlockSize1%2==0) subsize=rowBlockSize1/2; else subsize=(rowBlockSize1+1)/2; } cout << "nb_blocks=" << nb_blocks << " subsize=" << subsize << " rowBlockSize1=" << rowBlockSize1 << endl; cout << "start creat stream " <<endl; creat_stream_2gpus<int, int, float>(d_rowBlocks0, d_a0, d_b0, d_val0, d_xadj0, d_adj0, d_prin0, d_prout0, d_blkSize0, d_rows_for_vector0, d_blkMultiplier0, d_rowBlocks1, d_a1, d_b1, d_val1, d_xadj1, d_adj1, d_prin1, d_prout1, d_blkSize1, d_rows_for_vector1, d_blkMultiplier1, streams, stream_number ); cout << "end creat stream " <<endl; cout << "start split task " <<endl; int nb_tasks = split_input_to_tasks(rowBlocks, rowBlockSize1, subsize, *tasks); cout << "fin split task " << "nb_tasks=" << nb_tasks << endl; // nb_tasks--; int begin = rowBlockSize1; int end = 0; hipSetDevice(0); checkCudaErrors( hipMemcpy(d_end, &end, sizeof(*d_end), hipMemcpyHostToDevice) ); hipSetDevice(1); checkCudaErrors( hipMemcpy(d_begin, &begin, sizeof(*d_begin), hipMemcpyHostToDevice) ); int size = (blkSize) * sizeof(float); int medium; for (int TRY=0; TRY<THROW_AWAY+nTry; ++TRY) { if (TRY >= THROW_AWAY) start = util::timestamp(); int maxiter = 1; //medium = ((rowBlocks[mediumRowblocks] >> (64-32)) & ((1UL << 32) - 1UL)); //for GPU0 hipSetDevice(0); //setup prin hipMemcpyAsync(d_prin0, d_prior0, nVtx*sizeof(*prior), hipMemcpyDeviceToDevice,stream0); //hipMemcpyAsync(d_prin0, d_prior0, (medium)*sizeof(*prior), hipMemcpyDeviceToDevice,stream0); hipSetDevice(1); //setup prin hipMemcpyAsync(d_prin1, d_prior1, nVtx*sizeof(*prior), hipMemcpyDeviceToDevice,stream1); // hipMemcpyAsync(d_prin1+medium, d_prior1+medium, (nVtx-medium)*sizeof(*prior), hipMemcpyDeviceToDevice,stream1); hipSetDevice(1); checkCudaErrors( hipStreamSynchronize(stream1)); hipSetDevice(0); checkCudaErrors( hipStreamSynchronize(stream0)); begin = rowBlockSize1; end = 0; hipSetDevice(0); checkCudaErrors( hipMemcpy(d_end, &end, sizeof(*d_end), hipMemcpyHostToDevice) ); hipSetDevice(1); checkCudaErrors( hipMemcpy(d_begin, &begin, sizeof(*d_begin), hipMemcpyHostToDevice) ); for (int iter = 0; iter < maxiter ; iter++) { int top = 0; int bottom = nb_tasks; if(iter == 0 ){ hipSetDevice(0); //setup prin hipMemcpyAsync(d_prout0, d_prior0, nVtx*sizeof(*prior), hipMemcpyDeviceToDevice,stream0); //hipMemcpyAsync(d_prin0, d_prior0, (medium)*sizeof(*prior), hipMemcpyDeviceToDevice,stream0); hipSetDevice(1); //setup prin hipMemcpyAsync(d_prout1, d_prior1, nVtx*sizeof(*prior), hipMemcpyDeviceToDevice,stream1); }else{ hipSetDevice(1); hipMemcpyAsync(d_prin1, d_prout0, (medium)*sizeof(*prior), hipMemcpyDeviceToDevice, stream1); hipSetDevice(0); hipMemcpyAsync(d_prin0+medium, d_prout1+medium, (nVtx-medium)*sizeof(*prior), hipMemcpyDeviceToDevice, stream0); hipSetDevice(0); hipMemcpyAsync(d_prout0, d_prior0, (medium)*sizeof(*prior), hipMemcpyDeviceToDevice, stream0); hipSetDevice(1); hipMemcpyAsync(d_prout1+medium, d_prior1+medium, (nVtx-medium)*sizeof(*prior), hipMemcpyDeviceToDevice, stream1); } hipSetDevice(0); hipDeviceSynchronize(); hipSetDevice(1); hipDeviceSynchronize(); int iteration = 0; //apel des deux fonctions. hipSetDevice(0); hipLaunchKernelGGL(( csr_adaptativeGPU0), dim3(WGSIZE), dim3(nThreadPerBlock), mmshared_size, stream0 , d_end, d_begin, d_val0, d_adj0, d_xadj0, d_prin0, d_prout0, d_rowBlocks0, d_a0, d_b0, d_blkSize0, d_blkMultiplier0, d_rows_for_vector0, rowBlockSize1); cudaPrintError("after kernel0"); hipSetDevice(1); hipLaunchKernelGGL(( csr_adaptativeGPU1), dim3(WGSIZE), dim3(nThreadPerBlock), mmshared_size, stream1 , d_end, d_begin, d_val1, d_adj1, d_xadj1, d_prin1, d_prout1, d_rowBlocks1, d_a1, d_b1, d_blkSize1, d_blkMultiplier1, d_rows_for_vector1, rowBlockSize1); cudaPrintError("after kernel1"); //hipDeviceSynchronize(); checkCudaErrors( hipStreamSynchronize(stream1)); hipSetDevice(0); //hipDeviceSynchronize(); checkCudaErrors( hipStreamSynchronize(stream0)); // util::timestamp stop2; // cout << " totaltime="<< stop2 - start << endl; // cout << "medium=" << medium << endl; //compute epsilon //using prin to compute epsilon hipSetDevice(0); hipblasSetStream(cublasHandle0, stream0); hipblasSaxpy (cublasHandle0, medium, d_epsalpha0, d_prout0, 1, d_prin0, 1); // d_prin = d_prout*-1 + d_prin hipblasSasum (cublasHandle0, medium, d_prin0, 1, d_eps0); hipMemcpyAsync(h_eps0, d_eps0, sizeof(*d_eps0), hipMemcpyDeviceToHost, stream0); // hipMemcpyAsync(d_prin0, d_prout0, nVtx*sizeof(*prout), hipMemcpyDeviceToDevice, stream0);//prepare prin for next iteration //compute epsilon //using prin to compute epsilon hipSetDevice(1); hipblasSetStream(cublasHandle1, stream1); hipblasSaxpy (cublasHandle1, (nVtx-medium), d_epsalpha1, d_prout1+medium, 1, d_prin1+medium, 1); // d_prin = d_prout*-1 + d_prin hipblasSasum(cublasHandle1, nVtx-medium, d_prin1+medium, 1, d_eps1); hipMemcpyAsync(h_eps1, d_eps1, sizeof(*h_eps1), hipMemcpyDeviceToHost, stream1); // hipSetDevice(1); // hipMemcpyAsync(d_prin1+medium, d_prout1+medium, (nVtx-medium)*sizeof(*prout), hipMemcpyDeviceToDevice,stream1);//prepare prin for next iteration // hipSetDevice(0); // hipMemcpyAsync(d_prin0, d_prout0, (medium)*sizeof(*prout), hipMemcpyDeviceToDevice, stream0);//prepare prin for next iteration hipSetDevice(1); checkCudaErrors( hipStreamSynchronize(stream1)); hipSetDevice(0); checkCudaErrors( hipStreamSynchronize(stream0)); //stopping condition // if (*h_eps0 +*h_eps1 < 0) // deactivited for testing purposes // iter = maxiter; std::cerr<<*h_eps0+*h_eps1<< " " ; } hipSetDevice(0); checkCudaErrors(hipMemcpy(&end, d_end, sizeof(*d_end), hipMemcpyDeviceToHost)); hipSetDevice(1); checkCudaErrors(hipMemcpy(&begin, d_begin, sizeof(*d_begin), hipMemcpyDeviceToHost)); int rowEnd = ((rowBlocks[end] >> (64-32)) & ((1UL << 32) - 1UL)); int rowBegin = ((rowBlocks[begin] >> (64-32)) & ((1UL << 32) - 1UL)); hipSetDevice(0); checkCudaErrors(hipMemcpy(prout, d_prout0, 1*sizeof(*prout), hipMemcpyDeviceToHost)); //hipSetDevice(1); //checkCudaErrors(hipMemcpy(prout+medium, d_prout1+medium, (nVtx-medium)*sizeof(*prout), hipMemcpyDeviceToHost)); std::cerr<< " d_begin=" <<begin << "d_end=" << end << endl; std::cerr<< " row_begin=" << rowBegin << "row_end=" << rowEnd << endl; for(int i=0; i<1; i++) { std::cerr.precision(10); std::cerr<<"PR["<< i<< "]="<<prout[i]<<std::endl; } if (TRY >= THROW_AWAY) { util::timestamp stop; totaltime += stop - start; cout << "ws totaltime="<< stop - start << endl; } } hipSetDevice(0); hipDeviceReset(); hipSetDevice(1); hipDeviceReset(); delete[] prin_; { std::stringstream ss; ss<<"part1V: "<< medium <<" part1E: "<<xadj[medium+1] <<" part2V: "<<nVtx-(medium)<<" part2E: "<< xadj[nVtx] - xadj[medium+1]; out = ss.str(); } return 0; }
513bd1bff5e1e12b4de09f011e5bd922496e7a70.cu
#include "main-pr.hpp" #define THROW_AWAY 1 #include "Padded2DArray.hpp" #include <omp.h> #include "memutils.hpp" #include <cmath> //#define SHOWLOADBALANCE #include "logged_array.hpp" //#define LOG #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <cusparse_v2.h> #include <cublas_v2.h> #include "helper_cuda.h" #include "math.h" //#include "streamUtils.hpp" #include "tbb/concurrent_queue.h" #include "AdaptativeUtils.hpp" template <typename VertexType, typename EdgeType, typename Scalar> int main_pr(VertexType nVtx, EdgeType* xadj_, VertexType *adj_, Scalar* val_, Scalar *prior_, Scalar* pr_, Scalar lambda, int nTry, //algo parameter util::timestamp& totaltime, std::string& out ) { int nb_blocks = 0; int blk_size = 0; int nb_threads = 0; int WGSIZE = 256; { char* str = getenv ("NBTHREAD"); if (str) { std::stringstream ss (str); ss>>nb_threads; if (!ss) std::cerr<<"NBTHREAD invalid"<<std::endl; } } { char* str = getenv ("NBBLOCK"); if (str) { std::stringstream ss (str); ss>>nb_blocks; if (!ss) std::cerr<<"NBBLOCK invalid"<<std::endl; } } { char* str = getenv ("BLKSIZE"); if (str) { std::stringstream ss (str); ss>>blk_size; if (!ss) std::cerr<<"SUBSIZE invalid"<<std::endl; } } { char* str = getenv ("WGSIZE"); if (str) { std::stringstream ss (str); ss>>WGSIZE; if (!ss) std::cerr<<"val invalid"<<std::endl; } } if(nb_threads == 0 ){ std::cerr<<" NBTHREAD=??? "<<std::endl; exit(0); } if(blk_size == 0 ){ std::cerr<<" BLKSIZE=??? "<<std::endl; exit(0); } if(nb_blocks == 0 ){ std::cerr<<" NBBLOCK=??? "<<std::endl; exit(0); } bool coldcache = true; util::timestamp start(0,0); //cpuside variables Scalar* prin_ = new Scalar[nVtx]; EdgeType* xadj = xadj_; VertexType *adj = adj_; Scalar* val = val_; Scalar* prior = prior_; Scalar* prin = prin_; Scalar* prout = pr_; Scalar alpha = lambda; Scalar beta = 1-lambda; Scalar alpha1 = lambda; Scalar beta1 = 1-lambda; Scalar epsalpha = -1; Scalar *h_eps0; Scalar *h_eps1; //cuda side variable EdgeType* d_xadj0 ; VertexType *d_adj0 ; Scalar* d_val0 ; Scalar* d_prior0 ; Scalar* d_prin0 ; Scalar* d_prout0 ; Scalar *d_alpha0; Scalar *d_beta0; Scalar *d_epsalpha0; Scalar *d_eps0; EdgeType* d_xadj1 ; VertexType *d_adj1 ; Scalar* d_val1 ; Scalar* d_prior1 ; Scalar* d_prin1 ; Scalar* d_prout1 ; Scalar *d_alpha1; Scalar *d_beta1; Scalar *d_epsalpha1; Scalar *d_eps1; /* Get handle to the CUBLAS context */ cudaSetDevice(0); cublasHandle_t cublasHandle0 = 0; cublasStatus_t cublasStatus0; cublasStatus0 = cublasCreate(&cublasHandle0); cublasSetPointerMode(cublasHandle0, CUBLAS_POINTER_MODE_DEVICE); checkCudaErrors( cudaSetDevice(1)); cublasHandle_t cublasHandle1 = 0; cublasStatus_t cublasStatus1; cublasStatus1 = cublasCreate(&cublasHandle1); cublasSetPointerMode(cublasHandle1, CUBLAS_POINTER_MODE_DEVICE); /* Get handle to the CUSPARSE context */ cudaSetDevice(0); cusparseHandle_t cusparseHandle0 = 0; cusparseStatus_t cusparseStatus0; cusparseStatus0 = cusparseCreate(&cusparseHandle0); cusparseMatDescr_t descr0 = 0; cusparseStatus0 = cusparseCreateMatDescr(&descr0); cusparseSetMatType(descr0,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr0,CUSPARSE_INDEX_BASE_ZERO); cudaSetDevice(1); cusparseHandle_t cusparseHandle1 = 0; cusparseStatus_t cusparseStatus1; cusparseStatus1 = cusparseCreate(&cusparseHandle1); cusparseMatDescr_t descr1 = 0; cusparseStatus1 = cusparseCreateMatDescr(&descr1); cusparseSetMatType(descr1,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr1,CUSPARSE_INDEX_BASE_ZERO); //cuda stream cudaSetDevice(0); cudaStream_t stream0; cudaStreamCreate(&stream0); cudaSetDevice(1); cudaStream_t stream1; cudaStreamCreate(&stream1); //cuda variable cudaSetDevice(0); int *d_end; checkCudaErrors( cudaMalloc((void**)&d_end, sizeof(*d_end)) ); cudaDeviceEnablePeerAccess(1, 0); cudaSetDevice(1); int *d_begin; checkCudaErrors( cudaMalloc((void**)&d_begin, sizeof(*d_begin)) ); cudaDeviceEnablePeerAccess(0, 0); //memalloc cudaSetDevice(0); checkCudaErrors( cudaMalloc((void**)&d_xadj0, (nVtx+1)*sizeof(*xadj)) ); checkCudaErrors( cudaMalloc((void**)&d_adj0, (xadj[nVtx])*sizeof(*adj)) ); checkCudaErrors( cudaMalloc((void**)&d_val0, (xadj[nVtx])*sizeof(*val)) ); checkCudaErrors( cudaMalloc((void**)&d_prior0, (nVtx*sizeof(*prior)))); checkCudaErrors( cudaMalloc((void**)&d_prin0, (nVtx*sizeof(*prin)) )); checkCudaErrors( cudaMalloc((void**)&d_prout0, (nVtx*sizeof(*prout)) )); checkCudaErrors( cudaMalloc((void**)&d_epsalpha0, (sizeof(epsalpha)) )); checkCudaErrors( cudaMallocHost((void**)&h_eps0, (sizeof(*h_eps0)) )); checkCudaErrors( cudaMalloc((void**)&d_eps0, (sizeof(*h_eps0)) )); cudaSetDevice(1); checkCudaErrors( cudaMalloc((void**)&d_xadj1, (nVtx+1)*sizeof(*xadj)) ); checkCudaErrors( cudaMalloc((void**)&d_adj1, (xadj[nVtx])*sizeof(*adj)) ); checkCudaErrors( cudaMalloc((void**)&d_val1, (xadj[nVtx])*sizeof(*val)) ); checkCudaErrors( cudaMalloc((void**)&d_prior1, (nVtx*sizeof(*prior)))); checkCudaErrors( cudaMalloc((void**)&d_prin1, (nVtx*sizeof(*prin)) )); checkCudaErrors( cudaMalloc((void**)&d_prout1, (nVtx*sizeof(*prout)) )); checkCudaErrors( cudaMalloc((void**)&d_epsalpha1, (sizeof(epsalpha)) )); checkCudaErrors( cudaMallocHost((void**)&h_eps1, (sizeof(*h_eps1)) )); checkCudaErrors( cudaMalloc((void**)&d_eps1, (sizeof(*h_eps1)) )); //cpu to gpu copies cudaSetDevice(0); checkCudaErrors( cudaMemcpy(d_xadj0, xadj, (nVtx+1)*sizeof(*xadj), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_adj0, adj, (xadj[nVtx])*sizeof(*adj), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_val0, val, (xadj[nVtx])*sizeof(*val), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_prior0, prior, nVtx*sizeof(*prior), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_epsalpha0, &epsalpha, sizeof(epsalpha), cudaMemcpyHostToDevice) ); cudaSetDevice(1); checkCudaErrors( cudaMemcpy(d_xadj1, xadj, (nVtx+1)*sizeof(*xadj), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_adj1, adj, (xadj[nVtx])*sizeof(*adj), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_val1, val, (xadj[nVtx])*sizeof(*val), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_prior1, prior, nVtx*sizeof(*prior), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_epsalpha1, &epsalpha, sizeof(epsalpha), cudaMemcpyHostToDevice) ); //tester PeerAcces int nRows = nVtx; unsigned long* rowBlocks; const int nThreadPerBlock = nb_threads; const unsigned int blkSize = blk_size; const unsigned int blkMultiplier = 3; const unsigned int rows_for_vector = 2; const bool allocate_row_blocks = true; //device 0 variable unsigned long* d_rowBlocks0; unsigned int* d_blkSize0; unsigned int* d_rows_for_vector0; unsigned int* d_blkMultiplier0; float* d_a0; float* d_b0; //device 1 variable unsigned long* d_rowBlocks1; unsigned int* d_blkSize1; unsigned int* d_rows_for_vector1; unsigned int* d_blkMultiplier1; float* d_a1; float* d_b1; int rowBlockSize1; int rowBlockSize2; //calculer rowBlockSize rowBlockSize1 = ComputeRowBlocksSize<int,int>(xadj, nVtx, blkSize, blkMultiplier, rows_for_vector, nThreadPerBlock); //cout << "rowBlockSize1 : " << rowBlockSize1 << endl; //declarer rowBlocks rowBlocks = (unsigned long*) calloc(sizeof(unsigned long),rowBlockSize1); rowBlockSize2 = rowBlockSize1; //calculer rowBlocks ComputeRowBlocks<int,int>( rowBlocks, rowBlockSize2, xadj, nVtx, blkSize, blkMultiplier, rows_for_vector, nThreadPerBlock, allocate_row_blocks); //cout << "rowBlockSize2 : " << rowBlockSize2 <<endl; // int end = ((rowBlocks[rowBlockSize1] >> (64-32)) & ((1UL << 32) - 1UL)); // cout << " end : " << end <<endl; cout << "rowBlockSize1 : " << rowBlockSize1 << endl; //malloc for device 0 variable cudaSetDevice(0); checkCudaErrors( cudaMalloc((void**)&d_rowBlocks0, (rowBlockSize1*sizeof(unsigned long)))); checkCudaErrors( cudaMalloc((void**)&d_blkSize0, 1*sizeof(unsigned int))); checkCudaErrors( cudaMalloc((void**)&d_rows_for_vector0,1*sizeof(unsigned int))); checkCudaErrors( cudaMalloc((void**)&d_blkMultiplier0, 1*sizeof(unsigned int))); checkCudaErrors( cudaMalloc((void**)&d_a0, 1*sizeof(float))); checkCudaErrors( cudaMalloc((void**)&d_b0, 1*sizeof(float))); //malloc for device 1 variable cudaSetDevice(1); checkCudaErrors( cudaMalloc((void**)&d_rowBlocks1, (rowBlockSize1*sizeof(unsigned long)))); checkCudaErrors( cudaMalloc((void**)&d_blkSize1, 1*sizeof(unsigned int))); checkCudaErrors( cudaMalloc((void**)&d_rows_for_vector1,1*sizeof(unsigned int))); checkCudaErrors( cudaMalloc((void**)&d_blkMultiplier1, 1*sizeof(unsigned int))); checkCudaErrors( cudaMalloc((void**)&d_a1, 1*sizeof(float))); checkCudaErrors( cudaMalloc((void**)&d_b1, 1*sizeof(float))); //send data to device 0 cudaSetDevice(0); checkCudaErrors( cudaMemcpy(d_rowBlocks0, rowBlocks, rowBlockSize1*sizeof(unsigned long), cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_blkSize0, &blkSize, 1*sizeof(unsigned int), cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_rows_for_vector0, &rows_for_vector, 1*sizeof(unsigned int), cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_blkMultiplier0, &blkMultiplier, 1*sizeof(unsigned int), cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_a0, &alpha, 1*sizeof(Scalar), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_b0, &beta, 1*sizeof(Scalar), cudaMemcpyHostToDevice) ); //send data to device 1 cudaSetDevice(1); checkCudaErrors( cudaMemcpy(d_rowBlocks1, rowBlocks, rowBlockSize1*sizeof(unsigned long), cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_blkSize1, &blkSize, 1*sizeof(unsigned int), cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_rows_for_vector1, &rows_for_vector, 1*sizeof(unsigned int), cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_blkMultiplier1, &blkMultiplier, 1*sizeof(unsigned int), cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_a1, &alpha, 1*sizeof(Scalar), cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_b1, &beta, 1*sizeof(Scalar), cudaMemcpyHostToDevice) ); // prepar stream config list<Task> *tasks = new list<Task>; tbb::concurrent_bounded_queue<stream_container<int,int,float>* >* streams = new tbb::concurrent_bounded_queue<stream_container<int,int,float>* >; int mmshared_size = (blkSize + 1) * sizeof(float); // int nb_blocks = 512; int stream_number = 2; int X, subsize; X = (int) rowBlockSize1/(nb_blocks) ; if(X > 32){ if(X % 32 == 0){ subsize = X; }else{ X = X / 32 ; subsize = (X+1) * 32; } }else{ if(rowBlockSize1%2==0) subsize=rowBlockSize1/2; else subsize=(rowBlockSize1+1)/2; } cout << "nb_blocks=" << nb_blocks << " subsize=" << subsize << " rowBlockSize1=" << rowBlockSize1 << endl; cout << "start creat stream " <<endl; creat_stream_2gpus<int, int, float>(d_rowBlocks0, d_a0, d_b0, d_val0, d_xadj0, d_adj0, d_prin0, d_prout0, d_blkSize0, d_rows_for_vector0, d_blkMultiplier0, d_rowBlocks1, d_a1, d_b1, d_val1, d_xadj1, d_adj1, d_prin1, d_prout1, d_blkSize1, d_rows_for_vector1, d_blkMultiplier1, streams, stream_number ); cout << "end creat stream " <<endl; cout << "start split task " <<endl; int nb_tasks = split_input_to_tasks(rowBlocks, rowBlockSize1, subsize, *tasks); cout << "fin split task " << "nb_tasks=" << nb_tasks << endl; // nb_tasks--; int begin = rowBlockSize1; int end = 0; cudaSetDevice(0); checkCudaErrors( cudaMemcpy(d_end, &end, sizeof(*d_end), cudaMemcpyHostToDevice) ); cudaSetDevice(1); checkCudaErrors( cudaMemcpy(d_begin, &begin, sizeof(*d_begin), cudaMemcpyHostToDevice) ); int size = (blkSize) * sizeof(float); int medium; for (int TRY=0; TRY<THROW_AWAY+nTry; ++TRY) { if (TRY >= THROW_AWAY) start = util::timestamp(); int maxiter = 1; //medium = ((rowBlocks[mediumRowblocks] >> (64-32)) & ((1UL << 32) - 1UL)); //for GPU0 cudaSetDevice(0); //setup prin cudaMemcpyAsync(d_prin0, d_prior0, nVtx*sizeof(*prior), cudaMemcpyDeviceToDevice,stream0); //cudaMemcpyAsync(d_prin0, d_prior0, (medium)*sizeof(*prior), cudaMemcpyDeviceToDevice,stream0); cudaSetDevice(1); //setup prin cudaMemcpyAsync(d_prin1, d_prior1, nVtx*sizeof(*prior), cudaMemcpyDeviceToDevice,stream1); // cudaMemcpyAsync(d_prin1+medium, d_prior1+medium, (nVtx-medium)*sizeof(*prior), cudaMemcpyDeviceToDevice,stream1); cudaSetDevice(1); checkCudaErrors( cudaStreamSynchronize(stream1)); cudaSetDevice(0); checkCudaErrors( cudaStreamSynchronize(stream0)); begin = rowBlockSize1; end = 0; cudaSetDevice(0); checkCudaErrors( cudaMemcpy(d_end, &end, sizeof(*d_end), cudaMemcpyHostToDevice) ); cudaSetDevice(1); checkCudaErrors( cudaMemcpy(d_begin, &begin, sizeof(*d_begin), cudaMemcpyHostToDevice) ); for (int iter = 0; iter < maxiter ; iter++) { int top = 0; int bottom = nb_tasks; if(iter == 0 ){ cudaSetDevice(0); //setup prin cudaMemcpyAsync(d_prout0, d_prior0, nVtx*sizeof(*prior), cudaMemcpyDeviceToDevice,stream0); //cudaMemcpyAsync(d_prin0, d_prior0, (medium)*sizeof(*prior), cudaMemcpyDeviceToDevice,stream0); cudaSetDevice(1); //setup prin cudaMemcpyAsync(d_prout1, d_prior1, nVtx*sizeof(*prior), cudaMemcpyDeviceToDevice,stream1); }else{ cudaSetDevice(1); cudaMemcpyAsync(d_prin1, d_prout0, (medium)*sizeof(*prior), cudaMemcpyDeviceToDevice, stream1); cudaSetDevice(0); cudaMemcpyAsync(d_prin0+medium, d_prout1+medium, (nVtx-medium)*sizeof(*prior), cudaMemcpyDeviceToDevice, stream0); cudaSetDevice(0); cudaMemcpyAsync(d_prout0, d_prior0, (medium)*sizeof(*prior), cudaMemcpyDeviceToDevice, stream0); cudaSetDevice(1); cudaMemcpyAsync(d_prout1+medium, d_prior1+medium, (nVtx-medium)*sizeof(*prior), cudaMemcpyDeviceToDevice, stream1); } cudaSetDevice(0); cudaDeviceSynchronize(); cudaSetDevice(1); cudaDeviceSynchronize(); int iteration = 0; //apel des deux fonctions. cudaSetDevice(0); csr_adaptativeGPU0<<< WGSIZE, nThreadPerBlock, mmshared_size, stream0 >>>(d_end, d_begin, d_val0, d_adj0, d_xadj0, d_prin0, d_prout0, d_rowBlocks0, d_a0, d_b0, d_blkSize0, d_blkMultiplier0, d_rows_for_vector0, rowBlockSize1); cudaPrintError("after kernel0"); cudaSetDevice(1); csr_adaptativeGPU1<<< WGSIZE, nThreadPerBlock, mmshared_size, stream1 >>>(d_end, d_begin, d_val1, d_adj1, d_xadj1, d_prin1, d_prout1, d_rowBlocks1, d_a1, d_b1, d_blkSize1, d_blkMultiplier1, d_rows_for_vector1, rowBlockSize1); cudaPrintError("after kernel1"); //cudaDeviceSynchronize(); checkCudaErrors( cudaStreamSynchronize(stream1)); cudaSetDevice(0); //cudaDeviceSynchronize(); checkCudaErrors( cudaStreamSynchronize(stream0)); // util::timestamp stop2; // cout << " totaltime="<< stop2 - start << endl; // cout << "medium=" << medium << endl; //compute epsilon //using prin to compute epsilon cudaSetDevice(0); cublasSetStream(cublasHandle0, stream0); cublasSaxpy (cublasHandle0, medium, d_epsalpha0, d_prout0, 1, d_prin0, 1); // d_prin = d_prout*-1 + d_prin cublasSasum (cublasHandle0, medium, d_prin0, 1, d_eps0); cudaMemcpyAsync(h_eps0, d_eps0, sizeof(*d_eps0), cudaMemcpyDeviceToHost, stream0); // cudaMemcpyAsync(d_prin0, d_prout0, nVtx*sizeof(*prout), cudaMemcpyDeviceToDevice, stream0);//prepare prin for next iteration //compute epsilon //using prin to compute epsilon cudaSetDevice(1); cublasSetStream(cublasHandle1, stream1); cublasSaxpy (cublasHandle1, (nVtx-medium), d_epsalpha1, d_prout1+medium, 1, d_prin1+medium, 1); // d_prin = d_prout*-1 + d_prin cublasSasum(cublasHandle1, nVtx-medium, d_prin1+medium, 1, d_eps1); cudaMemcpyAsync(h_eps1, d_eps1, sizeof(*h_eps1), cudaMemcpyDeviceToHost, stream1); // cudaSetDevice(1); // cudaMemcpyAsync(d_prin1+medium, d_prout1+medium, (nVtx-medium)*sizeof(*prout), cudaMemcpyDeviceToDevice,stream1);//prepare prin for next iteration // cudaSetDevice(0); // cudaMemcpyAsync(d_prin0, d_prout0, (medium)*sizeof(*prout), cudaMemcpyDeviceToDevice, stream0);//prepare prin for next iteration cudaSetDevice(1); checkCudaErrors( cudaStreamSynchronize(stream1)); cudaSetDevice(0); checkCudaErrors( cudaStreamSynchronize(stream0)); //stopping condition // if (*h_eps0 +*h_eps1 < 0) // deactivited for testing purposes // iter = maxiter; std::cerr<<*h_eps0+*h_eps1<< " " ; } cudaSetDevice(0); checkCudaErrors(cudaMemcpy(&end, d_end, sizeof(*d_end), cudaMemcpyDeviceToHost)); cudaSetDevice(1); checkCudaErrors(cudaMemcpy(&begin, d_begin, sizeof(*d_begin), cudaMemcpyDeviceToHost)); int rowEnd = ((rowBlocks[end] >> (64-32)) & ((1UL << 32) - 1UL)); int rowBegin = ((rowBlocks[begin] >> (64-32)) & ((1UL << 32) - 1UL)); cudaSetDevice(0); checkCudaErrors(cudaMemcpy(prout, d_prout0, 1*sizeof(*prout), cudaMemcpyDeviceToHost)); //cudaSetDevice(1); //checkCudaErrors(cudaMemcpy(prout+medium, d_prout1+medium, (nVtx-medium)*sizeof(*prout), cudaMemcpyDeviceToHost)); std::cerr<< " d_begin=" <<begin << "d_end=" << end << endl; std::cerr<< " row_begin=" << rowBegin << "row_end=" << rowEnd << endl; for(int i=0; i<1; i++) { std::cerr.precision(10); std::cerr<<"PR["<< i<< "]="<<prout[i]<<std::endl; } if (TRY >= THROW_AWAY) { util::timestamp stop; totaltime += stop - start; cout << "ws totaltime="<< stop - start << endl; } } cudaSetDevice(0); cudaDeviceReset(); cudaSetDevice(1); cudaDeviceReset(); delete[] prin_; { std::stringstream ss; ss<<"part1V: "<< medium <<" part1E: "<<xadj[medium+1] <<" part2V: "<<nVtx-(medium)<<" part2E: "<< xadj[nVtx] - xadj[medium+1]; out = ss.str(); } return 0; }
ef852ce47f5b84175d39cc159bb039dd55b08cbf.hip
// !!! This is a file automatically generated by hipify!!! #include <assert.h> #include <err.h> #include <errno.h> #include <fcntl.h> #include <functional> #include <math.h> #include <memory> #include <random> #include <stdint.h> #include <stdio.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #include <cblas.h> #include <algorithm> #include <numeric> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <cudnn.h> using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif long fsize(int fd) { struct stat stat; int res = fstat(fd, &stat); return stat.st_size; } int printll(char *s) { while (*s != '\n' && *s != ',' && *s != '\t') { putchar(*s++); } return 0; } long hash(char *str0, int len) { unsigned char *str = (unsigned char *)str0; unsigned long hash = 5381; int c; while ((c = *str++) && len--) hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ return hash; } long HEAP_SIZE_CPU = 1073741826; // 1048576; // 536870912; // 268435456; // 2097152; 1610612739; // 4294967304; // void *mallocBase = calloc(HEAP_SIZE_CPU, 1); void *mallocAddr = mallocBase; void *waterMark = mallocBase; void *myMalloc(size_t bytes) { void *res = mallocAddr; mallocAddr = (void *)((char *)mallocAddr + bytes); if ((long)mallocAddr >= (long)mallocBase + HEAP_SIZE_CPU) fprintf(stderr, "CPU memory breached limit of HEAP_SIZE_CPU\n"); return res; } long HEAP_SIZE = 8589934608; // 4294967304; // this is for GPU int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) { long int diff = (t2->tv_usec + 1000000 * t2->tv_sec) - (t1->tv_usec + 1000000 * t1->tv_sec); result->tv_sec = diff / 1000000; result->tv_usec = diff % 1000000; return (diff < 0); } #define CUDA_CALL(f) { \ hipError_t err = (f); \ if (err != hipSuccess) { \ fprintf(stderr, "CUDA error occurred: %s (%s:%d)\n", \ hipGetErrorString(err), __FILE__, __LINE__); \ exit(err); \ } \ } #define CUBLAS_CALL(f) { \ hipblasStatus_t stat = (f); \ if (stat != HIPBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "cuBLAS error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void *gpuMallocBase; void *gpuMallocAddr; // Alignment boundary size, in bytes. constexpr int N = 4; // 16 void *myGpuMalloc(size_t bytes) { bytes = ((bytes + (1 << N) - 1) >> N) << N; void *res = gpuMallocAddr; gpuMallocAddr = (void *)((char *)gpuMallocAddr + bytes); if ((long)gpuMallocAddr >= (long)gpuMallocBase + HEAP_SIZE) fprintf(stderr, "GPU breached memory limit of HEAP_SIZE\n"); return res; } template <typename T> __global__ void arrayUpdate(T *data, int index, T value) { data[index] = value; } __global__ void arrayFill(float* data, float value, int size) { int stride = gridDim.x * blockDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < size; i += stride) data[i] = value; } __global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]); } } __global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { if (inplace) { if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0; } else { if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i]; } } } __global__ void nllLoss(float *x, int x_stride, float *y, int* target) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; y[tid] = -1 * x[offset]; } __global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; xGrad[offset] += -1 * yGrad[tid]; } // only for 4D tensor in and 3D tensor out __global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < nElement; i += stride) { int inOff2 = i / inSize3; int inDim3 = i - inOff2 * inSize3; int inOff1 = inOff2 / inSize2; int inDim2 = inOff2 - inOff1 * inSize2; int inDim0 = inOff1 / inSize1; int inDim1 = inOff1 - inDim0 * inSize1; int outOff = 0; if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2; if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2; in[i] += out[outOff]; } } //following - https://github.com/torch/cutorch/blob/master/lib/THC/THCTensorMath.cuh#L49 static inline __device__ int compute(int outputSize0, int outputSize1, int outputSize2, int outputSize3, int outputStride0, int outputStride1, int outputStride2, int outputStride3, const int dimSize, const int concatDim, int linearIndex) { int offset = 0; int curDimSize = 3 == concatDim ? dimSize : outputSize3; int nextDimIndex = linearIndex / curDimSize; int curDimIndex = linearIndex - curDimSize * nextDimIndex; int curDimOffset = curDimIndex * outputStride3; offset += curDimOffset; linearIndex = nextDimIndex; curDimSize = 2 == concatDim ? dimSize : outputSize2; nextDimIndex = linearIndex / curDimSize; curDimIndex = linearIndex - curDimSize * nextDimIndex; curDimOffset = curDimIndex * outputStride2; offset += curDimOffset; linearIndex = nextDimIndex; curDimSize = 1 == concatDim ? dimSize : outputSize1; nextDimIndex = linearIndex / curDimSize; curDimIndex = linearIndex - curDimSize * nextDimIndex; curDimOffset = curDimIndex * outputStride1; offset += curDimOffset; linearIndex = nextDimIndex; return offset + linearIndex * outputStride0; // for (int i = 3; i >= 1; i--) { // int curDimSize = i == concatDim ? dimSize : outputSize[i]; // int nextDimIndex = linearIndex / curDimSize; // int curDimIndex = linearIndex - curDimSize * nextDimIndex; // int curDimOffset = curDimIndex * outputStride[i]; // offset += curDimOffset; // linearIndex = nextDimIndex; // } // return offset + linearIndex * outputStride[0]; } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; while (tid < nElement) { int elementOffset = compute(outSize0, outSize1, outSize2, outSize3, outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); out[dataOffset + elementOffset] = data[tid]; tid += stride; } } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg_grad(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; while (tid < nElement) { int elementOffset = compute(outSize0, outSize1, outSize2, outSize3, outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); data[tid] += out[dataOffset + elementOffset]; tid += stride; } } __global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < outScalarCount; tid += stride) { int linearIndex = tid; int outIndex0 = linearIndex / outStride0; linearIndex = linearIndex - outIndex0 * outStride0; int outIndex1 = linearIndex / outStride1; int outIndex2 = linearIndex - outIndex1 * outStride1; int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1; out[tid] = in[inIndex]; } } __global__ void shift0(float* in, float* out, int inDim0, int inStride0, int inStride1, int inScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < inScalarCount; tid += stride) { int linearIndex = tid; int inIndex0 = linearIndex / inStride0; linearIndex = linearIndex - inIndex0 * inStride0; int inIndex1 = linearIndex / inStride1; if (inIndex0 + inIndex1 >= inDim0) return; out[tid + inIndex1 * inStride0] = in[tid]; } } __global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { if (d[tid] > clip) d[tid] = clip; if (d[tid] < -clip) d[tid] = -clip; m[tid] += d[tid] * d[tid]; x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001); d[tid] = 0; } } __global__ void momentum_update_1D_1D(float* x, float* d, float* m, float learning_rate, float momentum, float gradClip, bool nesterov, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { float temp = d[tid]; if (temp > gradClip) temp = gradClip; if (temp < -gradClip) temp = -gradClip; m[tid] *= momentum; m[tid] += temp; if (nesterov) { temp += momentum * m[tid]; } else { temp = m[tid]; } x[tid] -= learning_rate * temp; d[tid] = 0; } } __global__ void addScalar(float* in, float* out, float add, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] + add; } __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; } __global__ void multScalar(float* in, float* out, float mult, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * mult; } __global__ void divScalar(float* in, float* out, float div, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] / div; } __global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_mul_mutate(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] += in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] + in2[tid]; } __global__ void elementwise_1D_1D_minus(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] - in2[tid]; } __global__ void elementwise_1D_1D_div(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] / in2[tid]; } __global__ void elementwise_1D_1D_exp(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = exp(in[tid]); } __global__ void elementwise_1D_1D_log(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = log(in[tid]); } __global__ void elementwise_1D_1D_sqrt(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = sqrt(in[tid]); } __global__ void elementwise_1D_1D_square(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * in[tid]; } __global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * out_x[tid]; } __global__ void elementwise_1D_1D_log_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / in_x[tid]; } __global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2; } __global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid]; } __global__ void clipAt(float* in, float bound, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) { if (in[tid] > bound) in[tid] = bound; if (in[tid] < -bound) in[tid] = -bound; } } __global__ void mask4D(float* in, int* mask, int xstrides0, int xstrides1, int xstrides2, int xstrides3, int scalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < scalarCount; tid += stride) { int linearIndex = tid; int xindex0 = linearIndex / xstrides0; linearIndex = linearIndex - xstrides0 * xindex0; int xindex1 = linearIndex / xstrides1; linearIndex = linearIndex - xstrides1 * xindex1; int xindex2 = linearIndex / xstrides2; int xindex3 = linearIndex - xstrides2 * xindex2; if (xindex3 >= mask[xindex0]) in[tid] = 0; } } __global__ void mul_sub(float* in1, float* in2, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { out[tid] = in1[tid] * in2[tid % in2ScalarCount]; } } __global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { int index = tid % in2ScalarCount; in1_d[tid] += out[tid] * in2_x[index]; in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced! } } // From: https://github.com/pytorch/pytorch/blob/master/aten/src/THC/THCIntegerDivider.cuh // Result of div/mod operation stored together. template <typename Value> struct DivMod { Value div, mod; __host__ __device__ DivMod(Value div, Value mod) : div(div), mod(mod) { } }; // Base case: we only have an implementation for uint32_t for now. For // everything else, we use plain division. template <typename Value> struct IntDivider { IntDivider() { } // Dummy constructor for arrays. IntDivider(Value d) : divisor(d) { } __host__ __device__ inline Value div(Value n) const { return n / divisor; } __host__ __device__ inline Value mod(Value n) const { return n % divisor; } __host__ __device__ inline DivMod<Value> divmod(Value n) const { return DivMod<Value>(n / divisor, n % divisor); } Value divisor; }; // Implement fast integer division. template <> struct IntDivider<unsigned int> { static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int."); IntDivider() { } // Dummy constructor for arrays. IntDivider(unsigned int d) : divisor(d) { assert(divisor >= 1 && divisor <= INT32_MAX); // TODO: gcc/clang has __builtin_clz() but it's not portable. for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break; uint64_t one = 1; uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1; m1 = magic; assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits. } __host__ __device__ inline unsigned int div(unsigned int n) const { #ifdef __CUDA_ARCH__ // 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and // 'm1'. unsigned int t = __umulhi(n, m1); return (t + n) >> shift; #else // Using uint64_t so that the addition does not overflow. uint64_t t = ((uint64_t) n * m1) >> 32; return (t + n) >> shift; #endif } __host__ __device__ inline unsigned int mod(unsigned int n) const { return n - div(n) * divisor; } __host__ __device__ inline DivMod<unsigned int> divmod(unsigned int n) const { unsigned int q = div(n); return DivMod<unsigned int>(q, n - q * divisor); } unsigned int divisor; // d above. unsigned int m1; // Magic number: m' above. unsigned int shift; // Shift amounts. }; // From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/OffsetCalculator.cuh /// OffsetCalculator calculates the offset in bytes of a linear index for NARGS /// operands that share the same shape, but may have different strides. template <int NARGS> struct OffsetCalculator { static constexpr int MAX_DIMS = 25; // The offset for each argument (in bytes). Wrapper around fixed-size array. struct offsets_t { __host__ __device__ uint32_t& operator[](int idx) { return values[idx]; } uint32_t values[NARGS]; }; // OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides) : dims(dims) { OffsetCalculator(int dims, const int32_t* sizes, const int32_t* const* strides) : dims(dims) { for (int i = 0; i < MAX_DIMS; ++i) { if (i < dims) { sizes_[i] = IntDivider<uint32_t>(sizes[i]); } else { sizes_[i] = IntDivider<uint32_t>(1); } for (int arg = 0; arg < NARGS; arg++) { strides_[i][arg] = i < dims ? strides[arg][i] : 0; } } } __host__ __device__ offsets_t get(uint32_t linear_idx) const { offsets_t offsets; #pragma unroll for (int arg = 0; arg < NARGS; arg++) { offsets[arg] = 0; } #pragma unroll for (int dim = 0; dim < MAX_DIMS; ++dim) { if (dim == dims) { break; } auto divmod = sizes_[dim].divmod(linear_idx); linear_idx = divmod.div; #pragma unroll for (int arg = 0; arg < NARGS; arg++) { offsets[arg] += divmod.mod * strides_[dim][arg]; } } return offsets; } void print() { for (auto i = 1; i < 128; i++) { auto offsets = get(i); printf("offsets[%d]: ", i); for (auto arg = 0; arg < NARGS; arg++) { printf("%d ", offsets[arg]); } printf("\n"); } } int dims; IntDivider<uint32_t> sizes_[MAX_DIMS]; uint32_t strides_[MAX_DIMS][NARGS]; }; // From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/Loops.cuh template<int nt, int vt, typename func_t> __launch_bounds__(nt, 4) __global__ void elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); hipLaunchKernelGGL(( elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, 0, N, f); } template<typename func_t> void gpu_unary_kernel(float *res, float *x, int32_t resRank, const int32_t resScalarCount, const int32_t* resShape, const int32_t* const* strides, const func_t& f) { OffsetCalculator<2> calc(resRank, resShape, strides); launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) { auto offsets = calc.get(idx); float* out = &res[offsets[0]]; float* in = &x[offsets[1]]; *out = f(*in); }); } template<typename func_t> void gpu_binary_kernel(float *res, float *x, float *y, int32_t resRank, const int32_t resScalarCount, const int32_t* resShape, const int32_t* const* strides, const func_t& f) { OffsetCalculator<3> calc(resRank, resShape, strides); launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) { auto offsets = calc.get(idx); float* out = &res[offsets[0]]; float* in1 = &x[offsets[1]]; float* in2 = &y[offsets[2]]; *out = f(*in1, *in2); }); } #define CUDNN_CALL(f) { \ cudnnStatus_t stat = (f); \ if (stat != CUDNN_STATUS_SUCCESS) { \ fprintf(stderr, "cuDNN error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void Snippet(char *); std::random_device rd{}; std::mt19937 gen{rd()}; std::normal_distribution<> d{0, 0.01}; int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: query <filename>\n"); return 0; } Snippet(argv[1]); return 0; } /***************************************** Emitting C Generated Code *******************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> void Snippet(char* x0) { // Backend setup. hipblasHandle_t cublasHandle; CUBLAS_CALL(hipblasCreate(&cublasHandle)); CUDA_CALL(hipMalloc(&gpuMallocBase, HEAP_SIZE)); CUDA_CALL(hipMemset(gpuMallocBase, 0, HEAP_SIZE)); gpuMallocAddr = gpuMallocBase; cudnnHandle_t cudnnHandle; CUDNN_CALL(cudnnCreate(&cudnnHandle)); srand(42); struct timeval begin_0, end_0, diff_0; gettimeofday(&begin_0, NULL); int32_t x7 = open("../../cifar10_data/cifar-10-batches-bin/data_batch_1.bin",0); int64_t x8 = fsize(x7); int64_t x10 = x8 / 3073LL; int32_t x11 = (int32_t)x10; int32_t x12 = x11 * 3072; float* x13 = (float*)myMalloc(x12 * sizeof(float));; int* x14 = (int32_t*)myMalloc(x11 * sizeof(int32_t));; char* x9 = (char*)mmap(0, x8, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x7, 0); for(int x16=0; x16 < x11; x16++) { int32_t x17 = x16 * 3073; char x18 = x9[x17]; int32_t x19 = (int32_t)(unsigned char)x18; x14[x16] = x19; int32_t x25 = x17 + 1; int32_t x23 = x16 * 3072; for(int x22=0; x22 < 3072; x22++) { int32_t x26 = x25 + x22; char x27 = x9[x26]; int32_t x24 = x23 + x22; float x28 = (float)(unsigned char)x27; float x29 = x28 / 255.0f; x13[x24] = x29; } } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; int64_t x37 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); float x38 = (float)x37; float x39 = x38 / 1000000.0f; printf("Data normalized (all prepare time) in %lf sec\n",x39); // Tensor 'toGPU' invocation. float* x313 = (float*)myGpuMalloc(262144 * sizeof(float)); int32_t x42 = open("/home/fei/bitbucket/Lantern/src/out/PLDI19evaluation/resnet50/resnet50.onnx.bin",0); int64_t x43 = fsize(x42); float* x44 = (float*)mmap(0, x43, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x42, 0); float* x45 = x44+5205440; CUDA_CALL(hipMemcpy(x313, x45, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x316 = (float*)myGpuMalloc(256 * sizeof(float)); float* x46 = x44+148672; CUDA_CALL(hipMemcpy(x316, x46, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x319 = (float*)myGpuMalloc(128 * sizeof(float)); float* x47 = x44+816064; CUDA_CALL(hipMemcpy(x319, x47, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x322 = (float*)myGpuMalloc(128 * sizeof(float)); float* x48 = x44+950080; CUDA_CALL(hipMemcpy(x322, x48, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x325 = (float*)myGpuMalloc(64 * sizeof(float)); float* x49 = x44+94784; CUDA_CALL(hipMemcpy(x325, x49, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x328 = (float*)myGpuMalloc(32768 * sizeof(float)); float* x50 = x44+220608; CUDA_CALL(hipMemcpy(x328, x50, 32768 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x331 = (float*)myGpuMalloc(512 * sizeof(float)); float* x51 = x44+22495680; CUDA_CALL(hipMemcpy(x331, x51, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x334 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x52 = x44+2964928; CUDA_CALL(hipMemcpy(x334, x52, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x337 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x53 = x44+4348352; CUDA_CALL(hipMemcpy(x337, x53, 589824 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x340 = (float*)myGpuMalloc(512 * sizeof(float)); float* x54 = x44+20133312; CUDA_CALL(hipMemcpy(x340, x54, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x343 = (float*)myGpuMalloc(256 * sizeof(float)); float* x55 = x44+2169536; CUDA_CALL(hipMemcpy(x343, x55, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x346 = (float*)myGpuMalloc(128 * sizeof(float)); float* x56 = x44+668224; CUDA_CALL(hipMemcpy(x346, x56, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x349 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x57 = x44+2432448; CUDA_CALL(hipMemcpy(x349, x57, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x352 = (float*)myGpuMalloc(512 * sizeof(float)); float* x58 = x44+1446336; CUDA_CALL(hipMemcpy(x352, x58, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x355 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x59 = x44+4081088; CUDA_CALL(hipMemcpy(x355, x59, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x358 = (float*)myGpuMalloc(256 * sizeof(float)); float* x60 = x44+1578688; CUDA_CALL(hipMemcpy(x358, x60, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x361 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x61 = x44+6325696; CUDA_CALL(hipMemcpy(x361, x61, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x364 = (float*)myGpuMalloc(512 * sizeof(float)); float* x62 = x44+602048; CUDA_CALL(hipMemcpy(x364, x62, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x367 = (float*)myGpuMalloc(64 * sizeof(float)); float* x63 = x44+165888; CUDA_CALL(hipMemcpy(x367, x63, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x370 = (float*)myGpuMalloc(512 * sizeof(float)); float* x64 = x44+1164736; CUDA_CALL(hipMemcpy(x370, x64, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x373 = (float*)myGpuMalloc(64 * sizeof(float)); float* x65 = x44+6080; CUDA_CALL(hipMemcpy(x373, x65, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x376 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x66 = x44+253888; CUDA_CALL(hipMemcpy(x376, x66, 147456 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x379 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x67 = x44+20135360; CUDA_CALL(hipMemcpy(x379, x67, 2359296 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x382 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x68 = x44+2960832; CUDA_CALL(hipMemcpy(x382, x68, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x385 = (float*)myGpuMalloc(256 * sizeof(float)); float* x69 = x44+3227072; CUDA_CALL(hipMemcpy(x385, x69, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x388 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x70 = x44+3228096; CUDA_CALL(hipMemcpy(x388, x70, 589824 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x391 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x71 = x44+43456; CUDA_CALL(hipMemcpy(x391, x71, 16384 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x394 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x72 = x44+22496704; CUDA_CALL(hipMemcpy(x394, x72, 1048576 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x397 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x73 = x44+9092544; CUDA_CALL(hipMemcpy(x397, x73, 2359296 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x400 = (float*)myGpuMalloc(128 * sizeof(float)); float* x74 = x44+816320; CUDA_CALL(hipMemcpy(x400, x74, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x403 = (float*)myGpuMalloc(256 * sizeof(float)); float* x75 = x44+60608; CUDA_CALL(hipMemcpy(x403, x75, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x406 = (float*)myGpuMalloc(256 * sizeof(float)); float* x76 = x44+219584; CUDA_CALL(hipMemcpy(x406, x76, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x409 = (float*)myGpuMalloc(128 * sizeof(float)); float* x77 = x44+1379392; CUDA_CALL(hipMemcpy(x409, x77, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x412 = (float*)myGpuMalloc(128 * sizeof(float)); float* x78 = x44+1231296; CUDA_CALL(hipMemcpy(x412, x78, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x415 = (float*)myGpuMalloc(64 * sizeof(float)); float* x79 = x44+1856; CUDA_CALL(hipMemcpy(x415, x79, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x418 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x80 = x44+1098176; CUDA_CALL(hipMemcpy(x418, x80, 65536 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x421 = (float*)myGpuMalloc(512 * sizeof(float)); float* x81 = x44+601536; CUDA_CALL(hipMemcpy(x421, x81, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x424 = (float*)myGpuMalloc(128 * sizeof(float)); float* x82 = x44+401728; CUDA_CALL(hipMemcpy(x424, x82, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x427 = (float*)myGpuMalloc(64 * sizeof(float)); float* x83 = x44+131904; CUDA_CALL(hipMemcpy(x427, x83, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x430 = (float*)myGpuMalloc(128 * sizeof(float)); float* x84 = x44+949696; CUDA_CALL(hipMemcpy(x430, x84, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x433 = (float*)myGpuMalloc(512 * sizeof(float)); float* x85 = x44+15664576; CUDA_CALL(hipMemcpy(x433, x85, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x436 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x86 = x44+18027968; CUDA_CALL(hipMemcpy(x436, x86, 1048576 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x439 = (float*)myGpuMalloc(10 * sizeof(float)); float* x87 = x44+23573952; CUDA_CALL(hipMemcpy(x439, x87, 10 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x442 = (float*)myGpuMalloc(64 * sizeof(float)); float* x88 = x44+43264; CUDA_CALL(hipMemcpy(x442, x88, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x445 = (float*)myGpuMalloc(512 * sizeof(float)); float* x89 = x44+11453376; CUDA_CALL(hipMemcpy(x445, x89, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x448 = (float*)myGpuMalloc(64 * sizeof(float)); float* x90 = x44+6272; CUDA_CALL(hipMemcpy(x448, x90, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x451 = (float*)myGpuMalloc(512 * sizeof(float)); float* x91 = x44+882112; CUDA_CALL(hipMemcpy(x451, x91, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x454 = (float*)myGpuMalloc(64 * sizeof(float)); float* x92 = x44+6144; CUDA_CALL(hipMemcpy(x454, x92, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x457 = (float*)myGpuMalloc(512 * sizeof(float)); float* x93 = x44+1445824; CUDA_CALL(hipMemcpy(x457, x93, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x460 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x94 = x44+1379776; CUDA_CALL(hipMemcpy(x460, x94, 65536 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x463 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x95 = x44+3818944; CUDA_CALL(hipMemcpy(x463, x95, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x466 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x96 = x44+5202368; CUDA_CALL(hipMemcpy(x466, x96, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x469 = (float*)myGpuMalloc(256 * sizeof(float)); float* x97 = x44+148416; CUDA_CALL(hipMemcpy(x469, x97, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x472 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x98 = x44+7441856; CUDA_CALL(hipMemcpy(x472, x98, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x475 = (float*)myGpuMalloc(64 * sizeof(float)); float* x99 = x44+94720; CUDA_CALL(hipMemcpy(x475, x99, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x478 = (float*)myGpuMalloc(128 * sizeof(float)); float* x100 = x44+1097792; CUDA_CALL(hipMemcpy(x478, x100, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x481 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x101 = x44+12504512; CUDA_CALL(hipMemcpy(x481, x101, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x484 = (float*)myGpuMalloc(256 * sizeof(float)); float* x102 = x44+4938944; CUDA_CALL(hipMemcpy(x484, x102, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x487 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x103 = x44+14611904; CUDA_CALL(hipMemcpy(x487, x103, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x490 = (float*)myGpuMalloc(512 * sizeof(float)); float* x104 = x44+15666112; CUDA_CALL(hipMemcpy(x490, x104, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x493 = (float*)myGpuMalloc(512 * sizeof(float)); float* x105 = x44+18026432; CUDA_CALL(hipMemcpy(x493, x105, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x496 = (float*)myGpuMalloc(512 * sizeof(float)); float* x106 = x44+9091520; CUDA_CALL(hipMemcpy(x496, x106, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x499 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x107 = x44+19080640; CUDA_CALL(hipMemcpy(x499, x107, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x502 = (float*)myGpuMalloc(256 * sizeof(float)); float* x108 = x44+6588608; CUDA_CALL(hipMemcpy(x502, x108, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x505 = (float*)myGpuMalloc(256 * sizeof(float)); float* x109 = x44+8299456; CUDA_CALL(hipMemcpy(x505, x109, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x508 = (float*)myGpuMalloc(256 * sizeof(float)); float* x110 = x44+60352; CUDA_CALL(hipMemcpy(x508, x110, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x511 = (float*)myGpuMalloc(64 * sizeof(float)); float* x111 = x44+202944; CUDA_CALL(hipMemcpy(x511, x111, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x514 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x112 = x44+166080; CUDA_CALL(hipMemcpy(x514, x112, 36864 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x517 = (float*)myGpuMalloc(256 * sizeof(float)); float* x113 = x44+6058432; CUDA_CALL(hipMemcpy(x517, x113, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x520 = (float*)myGpuMalloc(524288 * sizeof(float)); float* x114 = x44+2436544; CUDA_CALL(hipMemcpy(x520, x114, 524288 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x523 = (float*)myGpuMalloc(256 * sizeof(float)); float* x115 = x44+77248; CUDA_CALL(hipMemcpy(x523, x115, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x526 = (float*)myGpuMalloc(256 * sizeof(float)); float* x116 = x44+6587840; CUDA_CALL(hipMemcpy(x526, x116, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x529 = (float*)myGpuMalloc(512 * sizeof(float)); float* x117 = x44+20133824; CUDA_CALL(hipMemcpy(x529, x117, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x532 = (float*)myGpuMalloc(128 * sizeof(float)); float* x118 = x44+1379264; CUDA_CALL(hipMemcpy(x532, x118, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x535 = (float*)myGpuMalloc(256 * sizeof(float)); float* x119 = x44+7708608; CUDA_CALL(hipMemcpy(x535, x119, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x538 = (float*)myGpuMalloc(64 * sizeof(float)); float* x120 = x44+165824; CUDA_CALL(hipMemcpy(x538, x120, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x541 = (float*)myGpuMalloc(512 * sizeof(float)); float* x121 = x44+1164224; CUDA_CALL(hipMemcpy(x541, x121, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x544 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x122 = x44+94912; CUDA_CALL(hipMemcpy(x544, x122, 36864 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x547 = (float*)myGpuMalloc(128 * sizeof(float)); float* x123 = x44+253376; CUDA_CALL(hipMemcpy(x547, x123, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x550 = (float*)myGpuMalloc(256 * sizeof(float)); float* x124 = x44+7708096; CUDA_CALL(hipMemcpy(x550, x124, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x553 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x125 = x44+2962880; CUDA_CALL(hipMemcpy(x553, x125, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x556 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x126 = x44+203200; CUDA_CALL(hipMemcpy(x556, x126, 16384 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x559 = (float*)myGpuMalloc(512 * sizeof(float)); float* x127 = x44+883648; CUDA_CALL(hipMemcpy(x559, x127, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x562 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x128 = x44+6059456; CUDA_CALL(hipMemcpy(x562, x128, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x565 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x129 = x44+6336; CUDA_CALL(hipMemcpy(x565, x129, 36864 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x568 = (float*)myGpuMalloc(256 * sizeof(float)); float* x130 = x44+148928; CUDA_CALL(hipMemcpy(x568, x130, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x571 = (float*)myGpuMalloc(256 * sizeof(float)); float* x131 = x44+5467584; CUDA_CALL(hipMemcpy(x571, x131, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x574 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x132 = x44+8563136; CUDA_CALL(hipMemcpy(x574, x132, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x577 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x133 = x44+19076544; CUDA_CALL(hipMemcpy(x577, x133, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x580 = (float*)myGpuMalloc(128 * sizeof(float)); float* x134 = x44+816192; CUDA_CALL(hipMemcpy(x580, x134, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x583 = (float*)myGpuMalloc(256 * sizeof(float)); float* x135 = x44+3818176; CUDA_CALL(hipMemcpy(x583, x135, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x586 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x136 = x44+8299968; CUDA_CALL(hipMemcpy(x586, x136, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x589 = (float*)myGpuMalloc(256 * sizeof(float)); float* x137 = x44+5468352; CUDA_CALL(hipMemcpy(x589, x137, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x592 = (float*)myGpuMalloc(256 * sizeof(float)); float* x138 = x44+2170048; CUDA_CALL(hipMemcpy(x592, x138, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x595 = (float*)myGpuMalloc(128 * sizeof(float)); float* x139 = x44+668352; CUDA_CALL(hipMemcpy(x595, x139, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x598 = (float*)myGpuMalloc(512 * sizeof(float)); float* x140 = x44+468928; CUDA_CALL(hipMemcpy(x598, x140, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x601 = (float*)myGpuMalloc(64 * sizeof(float)); float* x141 = x44+94848; CUDA_CALL(hipMemcpy(x601, x141, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x604 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x142 = x44+23545280; CUDA_CALL(hipMemcpy(x604, x142, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x607 = (float*)myGpuMalloc(256 * sizeof(float)); float* x143 = x44+7179456; CUDA_CALL(hipMemcpy(x607, x143, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x610 = (float*)myGpuMalloc(64 * sizeof(float)); float* x144 = x44+43328; CUDA_CALL(hipMemcpy(x610, x144, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x613 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x145 = x44+401856; CUDA_CALL(hipMemcpy(x613, x145, 65536 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x616 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x146 = x44+14609856; CUDA_CALL(hipMemcpy(x616, x146, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x619 = (float*)myGpuMalloc(256 * sizeof(float)); float* x147 = x44+2169280; CUDA_CALL(hipMemcpy(x619, x147, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x622 = (float*)myGpuMalloc(256 * sizeof(float)); float* x148 = x44+7178944; CUDA_CALL(hipMemcpy(x622, x148, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x625 = (float*)myGpuMalloc(64 * sizeof(float)); float* x149 = x44+1920; CUDA_CALL(hipMemcpy(x625, x149, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x628 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x150 = x44+816576; CUDA_CALL(hipMemcpy(x628, x150, 65536 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x631 = (float*)myGpuMalloc(128 * sizeof(float)); float* x151 = x44+949952; CUDA_CALL(hipMemcpy(x631, x151, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x634 = (float*)myGpuMalloc(512 * sizeof(float)); float* x152 = x44+11452864; CUDA_CALL(hipMemcpy(x634, x152, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x637 = (float*)myGpuMalloc(64 * sizeof(float)); float* x153 = x44+6208; CUDA_CALL(hipMemcpy(x637, x153, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x640 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x154 = x44+12506560; CUDA_CALL(hipMemcpy(x640, x154, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x643 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x155 = x44+4939200; CUDA_CALL(hipMemcpy(x643, x155, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x646 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x156 = x44+2433472; CUDA_CALL(hipMemcpy(x646, x156, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x649 = (float*)myGpuMalloc(64 * sizeof(float)); float* x157 = x44+203136; CUDA_CALL(hipMemcpy(x649, x157, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x652 = (float*)myGpuMalloc(512 * sizeof(float)); float* x158 = x44+601024; CUDA_CALL(hipMemcpy(x652, x158, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x655 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x159 = x44+7442880; CUDA_CALL(hipMemcpy(x655, x159, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x658 = (float*)myGpuMalloc(512 * sizeof(float)); float* x160 = x44+9092032; CUDA_CALL(hipMemcpy(x658, x160, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x661 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x161 = x44+8564160; CUDA_CALL(hipMemcpy(x661, x161, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x664 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x162 = x44+23551424; CUDA_CALL(hipMemcpy(x664, x162, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x667 = (float*)myGpuMalloc(256 * sizeof(float)); float* x163 = x44+4938688; CUDA_CALL(hipMemcpy(x667, x163, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x670 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x164 = x44+14613952; CUDA_CALL(hipMemcpy(x670, x164, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x673 = (float*)myGpuMalloc(256 * sizeof(float)); float* x165 = x44+60096; CUDA_CALL(hipMemcpy(x673, x165, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x676 = (float*)myGpuMalloc(128 * sizeof(float)); float* x166 = x44+1097664; CUDA_CALL(hipMemcpy(x676, x166, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x679 = (float*)myGpuMalloc(128 * sizeof(float)); float* x167 = x44+401600; CUDA_CALL(hipMemcpy(x679, x167, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x682 = (float*)myGpuMalloc(256 * sizeof(float)); float* x168 = x44+4347328; CUDA_CALL(hipMemcpy(x682, x168, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x685 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x169 = x44+132032; CUDA_CALL(hipMemcpy(x685, x169, 16384 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x688 = (float*)myGpuMalloc(256 * sizeof(float)); float* x170 = x44+1578944; CUDA_CALL(hipMemcpy(x688, x170, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x691 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x171 = x44+1165760; CUDA_CALL(hipMemcpy(x691, x171, 65536 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x694 = (float*)myGpuMalloc(256 * sizeof(float)); float* x172 = x44+220352; CUDA_CALL(hipMemcpy(x694, x172, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x697 = (float*)myGpuMalloc(128 * sizeof(float)); float* x173 = x44+253760; CUDA_CALL(hipMemcpy(x697, x173, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x700 = (float*)myGpuMalloc(64 * sizeof(float)); float* x174 = x44+203008; CUDA_CALL(hipMemcpy(x700, x174, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x703 = (float*)myGpuMalloc(256 * sizeof(float)); float* x175 = x44+6058688; CUDA_CALL(hipMemcpy(x703, x175, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x706 = (float*)myGpuMalloc(512 * sizeof(float)); float* x176 = x44+15665088; CUDA_CALL(hipMemcpy(x706, x176, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x709 = (float*)myGpuMalloc(512 * sizeof(float)); float* x177 = x44+18026944; CUDA_CALL(hipMemcpy(x709, x177, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x712 = (float*)myGpuMalloc(524288 * sizeof(float)); float* x178 = x44+8566208; CUDA_CALL(hipMemcpy(x712, x178, 524288 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x715 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x179 = x44+5203392; CUDA_CALL(hipMemcpy(x715, x179, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x718 = (float*)myGpuMalloc(256 * sizeof(float)); float* x180 = x44+8298944; CUDA_CALL(hipMemcpy(x718, x180, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x721 = (float*)myGpuMalloc(64 * sizeof(float)); float* x181 = x44+94656; CUDA_CALL(hipMemcpy(x721, x181, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x724 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x182 = x44+4084160; CUDA_CALL(hipMemcpy(x724, x182, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x727 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x183 = x44+19078592; CUDA_CALL(hipMemcpy(x727, x183, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x730 = (float*)myGpuMalloc(512 * sizeof(float)); float* x184 = x44+467392; CUDA_CALL(hipMemcpy(x730, x184, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x733 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x185 = x44+6322624; CUDA_CALL(hipMemcpy(x733, x185, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x736 = (float*)myGpuMalloc(512 * sizeof(float)); float* x186 = x44+883136; CUDA_CALL(hipMemcpy(x736, x186, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x739 = (float*)myGpuMalloc(128 * sizeof(float)); float* x187 = x44+1379648; CUDA_CALL(hipMemcpy(x739, x187, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x742 = (float*)myGpuMalloc(512 * sizeof(float)); float* x188 = x44+468416; CUDA_CALL(hipMemcpy(x742, x188, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x745 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x189 = x44+149440; CUDA_CALL(hipMemcpy(x745, x189, 16384 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x748 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x190 = x44+7445952; CUDA_CALL(hipMemcpy(x748, x190, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x751 = (float*)myGpuMalloc(1728 * sizeof(float)); float* x191 = x44+0; CUDA_CALL(hipMemcpy(x751, x191, 1728 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x754 = (float*)myGpuMalloc(64 * sizeof(float)); float* x192 = x44+131840; CUDA_CALL(hipMemcpy(x754, x192, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x757 = (float*)myGpuMalloc(512 * sizeof(float)); float* x193 = x44+15665600; CUDA_CALL(hipMemcpy(x757, x193, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x760 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x194 = x44+15666624; CUDA_CALL(hipMemcpy(x760, x194, 2359296 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x763 = (float*)myGpuMalloc(512 * sizeof(float)); float* x195 = x44+1445312; CUDA_CALL(hipMemcpy(x763, x195, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x766 = (float*)myGpuMalloc(256 * sizeof(float)); float* x196 = x44+3227840; CUDA_CALL(hipMemcpy(x766, x196, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x769 = (float*)myGpuMalloc(64 * sizeof(float)); float* x197 = x44+43392; CUDA_CALL(hipMemcpy(x769, x197, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x772 = (float*)myGpuMalloc(512 * sizeof(float)); float* x198 = x44+11452352; CUDA_CALL(hipMemcpy(x772, x198, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x775 = (float*)myGpuMalloc(512 * sizeof(float)); float* x199 = x44+18025920; CUDA_CALL(hipMemcpy(x775, x199, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x778 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x200 = x44+6324672; CUDA_CALL(hipMemcpy(x778, x200, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x781 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x201 = x44+60864; CUDA_CALL(hipMemcpy(x781, x201, 16384 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x784 = (float*)myGpuMalloc(256 * sizeof(float)); float* x202 = x44+5468096; CUDA_CALL(hipMemcpy(x784, x202, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x787 = (float*)myGpuMalloc(64 * sizeof(float)); float* x203 = x44+43200; CUDA_CALL(hipMemcpy(x787, x203, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x790 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x204 = x44+1231808; CUDA_CALL(hipMemcpy(x790, x204, 147456 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x793 = (float*)myGpuMalloc(256 * sizeof(float)); float* x205 = x44+149184; CUDA_CALL(hipMemcpy(x793, x205, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x796 = (float*)myGpuMalloc(512 * sizeof(float)); float* x206 = x44+1163712; CUDA_CALL(hipMemcpy(x796, x206, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x799 = (float*)myGpuMalloc(256 * sizeof(float)); float* x207 = x44+7178688; CUDA_CALL(hipMemcpy(x799, x207, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x802 = (float*)myGpuMalloc(512 * sizeof(float)); float* x208 = x44+22495168; CUDA_CALL(hipMemcpy(x802, x208, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x805 = (float*)myGpuMalloc(128 * sizeof(float)); float* x209 = x44+949824; CUDA_CALL(hipMemcpy(x805, x209, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x808 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x210 = x44+78272; CUDA_CALL(hipMemcpy(x808, x210, 16384 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x811 = (float*)myGpuMalloc(128 * sizeof(float)); float* x211 = x44+253504; CUDA_CALL(hipMemcpy(x811, x211, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x814 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x212 = x44+14607808; CUDA_CALL(hipMemcpy(x814, x212, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x817 = (float*)myGpuMalloc(256 * sizeof(float)); float* x213 = x44+4348096; CUDA_CALL(hipMemcpy(x817, x213, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x820 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x214 = x44+1579456; CUDA_CALL(hipMemcpy(x820, x214, 589824 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x823 = (float*)myGpuMalloc(256 * sizeof(float)); float* x215 = x44+7708864; CUDA_CALL(hipMemcpy(x823, x215, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x826 = (float*)myGpuMalloc(128 * sizeof(float)); float* x216 = x44+668480; CUDA_CALL(hipMemcpy(x826, x216, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x829 = (float*)myGpuMalloc(256 * sizeof(float)); float* x217 = x44+4347840; CUDA_CALL(hipMemcpy(x829, x217, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x832 = (float*)myGpuMalloc(64 * sizeof(float)); float* x218 = x44+203072; CUDA_CALL(hipMemcpy(x832, x218, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x835 = (float*)myGpuMalloc(131072 * sizeof(float)); float* x219 = x44+1447360; CUDA_CALL(hipMemcpy(x835, x219, 131072 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x838 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x220 = x44+23547328; CUDA_CALL(hipMemcpy(x838, x220, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x841 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x221 = x44+4083136; CUDA_CALL(hipMemcpy(x841, x221, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x844 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x222 = x44+8565184; CUDA_CALL(hipMemcpy(x844, x222, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x847 = (float*)myGpuMalloc(256 * sizeof(float)); float* x223 = x44+220096; CUDA_CALL(hipMemcpy(x847, x223, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x850 = (float*)myGpuMalloc(256 * sizeof(float)); float* x224 = x44+6588096; CUDA_CALL(hipMemcpy(x850, x224, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x853 = (float*)myGpuMalloc(256 * sizeof(float)); float* x225 = x44+6058944; CUDA_CALL(hipMemcpy(x853, x225, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x856 = (float*)myGpuMalloc(64 * sizeof(float)); float* x226 = x44+166016; CUDA_CALL(hipMemcpy(x856, x226, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x859 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x227 = x44+5204416; CUDA_CALL(hipMemcpy(x859, x227, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x862 = (float*)myGpuMalloc(256 * sizeof(float)); float* x228 = x44+8299200; CUDA_CALL(hipMemcpy(x862, x228, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x865 = (float*)myGpuMalloc(128 * sizeof(float)); float* x229 = x44+401472; CUDA_CALL(hipMemcpy(x865, x229, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x868 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x230 = x44+950208; CUDA_CALL(hipMemcpy(x868, x230, 147456 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x871 = (float*)myGpuMalloc(256 * sizeof(float)); float* x231 = x44+4938432; CUDA_CALL(hipMemcpy(x871, x231, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x874 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x232 = x44+12508608; CUDA_CALL(hipMemcpy(x874, x232, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x877 = (float*)myGpuMalloc(512 * sizeof(float)); float* x233 = x44+22494656; CUDA_CALL(hipMemcpy(x877, x233, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x880 = (float*)myGpuMalloc(512 * sizeof(float)); float* x234 = x44+18027456; CUDA_CALL(hipMemcpy(x880, x234, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x883 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x235 = x44+884160; CUDA_CALL(hipMemcpy(x883, x235, 65536 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x886 = (float*)myGpuMalloc(256 * sizeof(float)); float* x236 = x44+4347584; CUDA_CALL(hipMemcpy(x886, x236, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x889 = (float*)myGpuMalloc(256 * sizeof(float)); float* x237 = x44+1579200; CUDA_CALL(hipMemcpy(x889, x237, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x892 = (float*)myGpuMalloc(256 * sizeof(float)); float* x238 = x44+59840; CUDA_CALL(hipMemcpy(x892, x238, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x895 = (float*)myGpuMalloc(256 * sizeof(float)); float* x239 = x44+3818432; CUDA_CALL(hipMemcpy(x895, x239, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x898 = (float*)myGpuMalloc(512 * sizeof(float)); float* x240 = x44+9090496; CUDA_CALL(hipMemcpy(x898, x240, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x901 = (float*)myGpuMalloc(512 * sizeof(float)); float* x241 = x44+22496192; CUDA_CALL(hipMemcpy(x901, x241, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x904 = (float*)myGpuMalloc(256 * sizeof(float)); float* x242 = x44+77504; CUDA_CALL(hipMemcpy(x904, x242, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x907 = (float*)myGpuMalloc(128 * sizeof(float)); float* x243 = x44+253632; CUDA_CALL(hipMemcpy(x907, x243, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x910 = (float*)myGpuMalloc(512 * sizeof(float)); float* x244 = x44+11451840; CUDA_CALL(hipMemcpy(x910, x244, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x913 = (float*)myGpuMalloc(64 * sizeof(float)); float* x245 = x44+1728; CUDA_CALL(hipMemcpy(x913, x245, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x916 = (float*)myGpuMalloc(512 * sizeof(float)); float* x246 = x44+600512; CUDA_CALL(hipMemcpy(x916, x246, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x919 = (float*)myGpuMalloc(64 * sizeof(float)); float* x247 = x44+131776; CUDA_CALL(hipMemcpy(x919, x247, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x922 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x248 = x44+7443904; CUDA_CALL(hipMemcpy(x922, x248, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x925 = (float*)myGpuMalloc(512 * sizeof(float)); float* x249 = x44+467904; CUDA_CALL(hipMemcpy(x925, x249, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x928 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x250 = x44+2963904; CUDA_CALL(hipMemcpy(x928, x250, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x931 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x251 = x44+11453888; CUDA_CALL(hipMemcpy(x931, x251, 1048576 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x934 = (float*)myGpuMalloc(512 * sizeof(float)); float* x252 = x44+20134336; CUDA_CALL(hipMemcpy(x934, x252, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x937 = (float*)myGpuMalloc(2097152 * sizeof(float)); float* x253 = x44+12510656; CUDA_CALL(hipMemcpy(x937, x253, 2097152 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x940 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x254 = x44+14616000; CUDA_CALL(hipMemcpy(x940, x254, 1048576 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x943 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x255 = x44+2434496; CUDA_CALL(hipMemcpy(x943, x255, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x946 = (float*)myGpuMalloc(128 * sizeof(float)); float* x256 = x44+1097920; CUDA_CALL(hipMemcpy(x946, x256, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x949 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x257 = x44+4085184; CUDA_CALL(hipMemcpy(x949, x257, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x952 = (float*)myGpuMalloc(256 * sizeof(float)); float* x258 = x44+3227328; CUDA_CALL(hipMemcpy(x952, x258, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x955 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x259 = x44+2961856; CUDA_CALL(hipMemcpy(x955, x259, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x958 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x260 = x44+7179712; CUDA_CALL(hipMemcpy(x958, x260, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x961 = (float*)myGpuMalloc(128 * sizeof(float)); float* x261 = x44+668096; CUDA_CALL(hipMemcpy(x961, x261, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x964 = (float*)myGpuMalloc(512 * sizeof(float)); float* x262 = x44+1165248; CUDA_CALL(hipMemcpy(x964, x262, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x967 = (float*)myGpuMalloc(512 * sizeof(float)); float* x263 = x44+9091008; CUDA_CALL(hipMemcpy(x967, x263, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x970 = (float*)myGpuMalloc(128 * sizeof(float)); float* x264 = x44+816448; CUDA_CALL(hipMemcpy(x970, x264, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x973 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x265 = x44+7709120; CUDA_CALL(hipMemcpy(x973, x265, 589824 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x976 = (float*)myGpuMalloc(20480 * sizeof(float)); float* x266 = x44+23553472; CUDA_CALL(hipMemcpy(x976, x266, 20480 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x979 = (float*)myGpuMalloc(256 * sizeof(float)); float* x267 = x44+4938176; CUDA_CALL(hipMemcpy(x979, x267, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x982 = (float*)myGpuMalloc(256 * sizeof(float)); float* x268 = x44+2169792; CUDA_CALL(hipMemcpy(x982, x268, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x985 = (float*)myGpuMalloc(256 * sizeof(float)); float* x269 = x44+6059200; CUDA_CALL(hipMemcpy(x985, x269, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x988 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x270 = x44+6323648; CUDA_CALL(hipMemcpy(x988, x270, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x991 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x271 = x44+4082112; CUDA_CALL(hipMemcpy(x991, x271, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x994 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x272 = x44+1984; CUDA_CALL(hipMemcpy(x994, x272, 4096 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x997 = (float*)myGpuMalloc(512 * sizeof(float)); float* x273 = x44+1446848; CUDA_CALL(hipMemcpy(x997, x273, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1000 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x274 = x44+668608; CUDA_CALL(hipMemcpy(x1000, x274, 147456 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1003 = (float*)myGpuMalloc(128 * sizeof(float)); float* x275 = x44+1231552; CUDA_CALL(hipMemcpy(x1003, x275, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1006 = (float*)myGpuMalloc(256 * sizeof(float)); float* x276 = x44+3818688; CUDA_CALL(hipMemcpy(x1006, x276, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1009 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x277 = x44+6321600; CUDA_CALL(hipMemcpy(x1009, x277, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1012 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x278 = x44+12502464; CUDA_CALL(hipMemcpy(x1012, x278, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1015 = (float*)myGpuMalloc(256 * sizeof(float)); float* x279 = x44+8299712; CUDA_CALL(hipMemcpy(x1015, x279, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1018 = (float*)myGpuMalloc(256 * sizeof(float)); float* x280 = x44+5467840; CUDA_CALL(hipMemcpy(x1018, x280, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1021 = (float*)myGpuMalloc(128 * sizeof(float)); float* x281 = x44+1231424; CUDA_CALL(hipMemcpy(x1021, x281, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1024 = (float*)myGpuMalloc(256 * sizeof(float)); float* x282 = x44+78016; CUDA_CALL(hipMemcpy(x1024, x282, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1027 = (float*)myGpuMalloc(64 * sizeof(float)); float* x283 = x44+131968; CUDA_CALL(hipMemcpy(x1027, x283, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1030 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x284 = x44+19082688; CUDA_CALL(hipMemcpy(x1030, x284, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1033 = (float*)myGpuMalloc(512 * sizeof(float)); float* x285 = x44+882624; CUDA_CALL(hipMemcpy(x1033, x285, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1036 = (float*)myGpuMalloc(256 * sizeof(float)); float* x286 = x44+219840; CUDA_CALL(hipMemcpy(x1036, x286, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1039 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x287 = x44+8562112; CUDA_CALL(hipMemcpy(x1039, x287, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1042 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x288 = x44+5468608; CUDA_CALL(hipMemcpy(x1042, x288, 589824 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1045 = (float*)myGpuMalloc(256 * sizeof(float)); float* x289 = x44+7179200; CUDA_CALL(hipMemcpy(x1045, x289, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1048 = (float*)myGpuMalloc(64 * sizeof(float)); float* x290 = x44+1792; CUDA_CALL(hipMemcpy(x1048, x290, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1051 = (float*)myGpuMalloc(128 * sizeof(float)); float* x291 = x44+401344; CUDA_CALL(hipMemcpy(x1051, x291, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1054 = (float*)myGpuMalloc(256 * sizeof(float)); float* x292 = x44+7708352; CUDA_CALL(hipMemcpy(x1054, x292, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1057 = (float*)myGpuMalloc(256 * sizeof(float)); float* x293 = x44+6588352; CUDA_CALL(hipMemcpy(x1057, x293, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1060 = (float*)myGpuMalloc(512 * sizeof(float)); float* x294 = x44+20134848; CUDA_CALL(hipMemcpy(x1060, x294, 512 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1063 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x295 = x44+602560; CUDA_CALL(hipMemcpy(x1063, x295, 65536 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1066 = (float*)myGpuMalloc(64 * sizeof(float)); float* x296 = x44+165952; CUDA_CALL(hipMemcpy(x1066, x296, 64 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1069 = (float*)myGpuMalloc(131072 * sizeof(float)); float* x297 = x44+469440; CUDA_CALL(hipMemcpy(x1069, x297, 131072 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1072 = (float*)myGpuMalloc(256 * sizeof(float)); float* x298 = x44+3227584; CUDA_CALL(hipMemcpy(x1072, x298, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1075 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x299 = x44+23549376; CUDA_CALL(hipMemcpy(x1075, x299, 2048 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1078 = (float*)myGpuMalloc(128 * sizeof(float)); float* x300 = x44+1231680; CUDA_CALL(hipMemcpy(x1078, x300, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1081 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x301 = x44+6588864; CUDA_CALL(hipMemcpy(x1081, x301, 589824 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1084 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x302 = x44+5201344; CUDA_CALL(hipMemcpy(x1084, x302, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1087 = (float*)myGpuMalloc(256 * sizeof(float)); float* x303 = x44+77760; CUDA_CALL(hipMemcpy(x1087, x303, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1090 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x304 = x44+19084736; CUDA_CALL(hipMemcpy(x1090, x304, 1048576 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1093 = (float*)myGpuMalloc(128 * sizeof(float)); float* x305 = x44+1098048; CUDA_CALL(hipMemcpy(x1093, x305, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1096 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x306 = x44+2435520; CUDA_CALL(hipMemcpy(x1096, x306, 1024 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1099 = (float*)myGpuMalloc(128 * sizeof(float)); float* x307 = x44+1379520; CUDA_CALL(hipMemcpy(x1099, x307, 128 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1102 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x308 = x44+2170304; CUDA_CALL(hipMemcpy(x1102, x308, 262144 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1105 = (float*)myGpuMalloc(256 * sizeof(float)); float* x309 = x44+1578432; CUDA_CALL(hipMemcpy(x1105, x309, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1108 = (float*)myGpuMalloc(256 * sizeof(float)); float* x310 = x44+3817920; CUDA_CALL(hipMemcpy(x1108, x310, 256 * sizeof(float), hipMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1111 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x311 = x44+7444928; CUDA_CALL(hipMemcpy(x1111, x311, 1024 * sizeof(float), hipMemcpyHostToDevice)); float* x1113 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1114 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1115 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1116 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1117 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1118 = (float*)myGpuMalloc(32768 * sizeof(float)); float* x1119 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1120 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1121 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1122 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1123 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1124 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1125 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1126 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1127 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1128 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1129 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1130 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1131 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1132 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1133 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1134 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x1135 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x1136 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1137 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1138 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1139 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1140 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x1141 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x1142 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1143 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1144 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1145 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1146 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1147 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1148 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1149 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1150 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1151 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1152 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1153 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1154 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x1155 = (float*)myGpuMalloc(10 * sizeof(float)); float* x1156 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1157 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1158 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1159 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1160 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1161 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1162 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1163 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1164 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1165 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1166 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1167 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1168 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1169 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1170 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1171 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1172 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1173 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1174 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1175 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1176 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1177 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1178 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1179 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1180 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x1181 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1182 = (float*)myGpuMalloc(524288 * sizeof(float)); float* x1183 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1184 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1185 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1186 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1187 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1188 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1189 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1190 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x1191 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1192 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1193 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1194 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1195 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1196 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1197 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x1198 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1199 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1200 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1201 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1202 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1203 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1204 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1205 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1206 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1207 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1208 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1209 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1210 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1211 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1212 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1213 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1214 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1215 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1216 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1217 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1218 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1219 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1220 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1221 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1222 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1223 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1224 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1225 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1226 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1227 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1228 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1229 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1230 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1231 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1232 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1233 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1234 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1235 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1236 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1237 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1238 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1239 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1240 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1241 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1242 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1243 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1244 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1245 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1246 = (float*)myGpuMalloc(524288 * sizeof(float)); float* x1247 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1248 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1249 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1250 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1251 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1252 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1253 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1254 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1255 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1256 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1257 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1258 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1259 = (float*)myGpuMalloc(1728 * sizeof(float)); float* x1260 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1261 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1262 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x1263 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1264 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1265 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1266 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1267 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1268 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1269 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1270 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1271 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1272 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x1273 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1274 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1275 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1276 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1277 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1278 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1279 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1280 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1281 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1282 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1283 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1284 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1285 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1286 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1287 = (float*)myGpuMalloc(131072 * sizeof(float)); float* x1288 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1289 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1290 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1291 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1292 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1293 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1294 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1295 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1296 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1297 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1298 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x1299 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1300 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1301 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1302 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1303 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1304 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1305 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1306 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1307 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1308 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1309 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1310 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1311 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1312 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1313 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1314 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1315 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1316 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1317 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1318 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1319 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x1320 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1321 = (float*)myGpuMalloc(2097152 * sizeof(float)); float* x1322 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x1323 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1324 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1325 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1326 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1327 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1328 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1329 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1330 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1331 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1332 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1333 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1334 = (float*)myGpuMalloc(20480 * sizeof(float)); float* x1335 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1336 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1337 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1338 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1339 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1340 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x1341 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1342 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x1343 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1344 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1345 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1346 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1347 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1348 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1349 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1350 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1351 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1352 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1353 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1354 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1355 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1356 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1357 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1358 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1359 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1360 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1361 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1362 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1363 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1364 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1365 = (float*)myGpuMalloc(131072 * sizeof(float)); float* x1366 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1367 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1368 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1369 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1370 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1371 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1372 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x1373 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1374 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1375 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1376 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1377 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1378 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1379 = (float*)myGpuMalloc(1024 * sizeof(float)); double* x1380 = (double*)myMalloc(4 * sizeof(double));; double* x1381 = (double*)myMalloc(4 * sizeof(double));; int64_t x1382 = (long)mallocAddr; int64_t x1383 = (long)gpuMallocAddr; // training loop starts here int32_t x1394 = x11 / 64; int32_t x1411 = 31 / 1; int32_t x1412 = x1411 + 1; int32_t x1416 = 4096 * x1412; int32_t x1417 = x1416 * x1412; int32_t x1413 = x1412 * x1412; int32_t x1414 = 64 * x1413; int32_t x1415 = 64 * x1414; int32_t x1443 = x1412 - 2; int32_t x1444 = x1443 / 2; int32_t x1445 = x1444 + 1; int32_t x1449 = 4096 * x1445; int32_t x1450 = x1449 * x1445; bool x1454 = x1445 >= 1; bool x1455; if (x1454) { x1455 = x1454; } else { x1455 = false; } int32_t x1460 = x1444 / 1; int32_t x1461 = x1460 + 1; int32_t x1465 = 4096 * x1461; int32_t x1466 = x1465 * x1461; int32_t x1462 = x1461 * x1461; int32_t x1463 = 64 * x1462; int32_t x1464 = 64 * x1463; int32_t x1488 = x1461 + 2; bool x1489 = x1488 >= 3; bool x1490; if (x1489) { x1490 = x1489; } else { x1490 = false; } int32_t x1495 = x1488 - 3; int32_t x1496 = x1495 / 1; int32_t x1497 = x1496 + 1; int32_t x1501 = 4096 * x1497; int32_t x1502 = x1501 * x1497; int32_t x1498 = x1497 * x1497; int32_t x1499 = 64 * x1498; int32_t x1500 = 64 * x1499; bool x1524 = x1497 >= 1; bool x1525; if (x1524) { x1525 = x1524; } else { x1525 = false; } int32_t x1530 = x1496 / 1; int32_t x1531 = x1530 + 1; int32_t x1535 = 16384 * x1531; int32_t x1536 = x1535 * x1531; int32_t x1532 = x1531 * x1531; int32_t x1533 = 256 * x1532; int32_t x1534 = 64 * x1533; int32_t x1558 = 16384 * x1461; int32_t x1559 = x1558 * x1461; int32_t x1556 = 256 * x1462; int32_t x1557 = 64 * x1556; bool x1576 = x1461 == 1; bool x1577 = x1461 == x1531; bool x1578 = x1576 || x1577; bool x1579; if (x1578) { x1579 = x1578; } else { x1579 = false; } bool x1594 = x1531 >= 1; bool x1595; if (x1594) { x1595 = x1594; } else { x1595 = false; } int32_t x1600 = x1530 / 1; int32_t x1601 = x1600 + 1; int32_t x1605 = 4096 * x1601; int32_t x1606 = x1605 * x1601; int32_t x1602 = x1601 * x1601; int32_t x1603 = 64 * x1602; int32_t x1604 = 64 * x1603; int32_t x1628 = x1601 + 2; bool x1629 = x1628 >= 3; bool x1630; if (x1629) { x1630 = x1629; } else { x1630 = false; } int32_t x1635 = x1628 - 3; int32_t x1636 = x1635 / 1; int32_t x1637 = x1636 + 1; int32_t x1641 = 4096 * x1637; int32_t x1642 = x1641 * x1637; int32_t x1638 = x1637 * x1637; int32_t x1639 = 64 * x1638; int32_t x1640 = 64 * x1639; bool x1664 = x1637 >= 1; bool x1665; if (x1664) { x1665 = x1664; } else { x1665 = false; } int32_t x1670 = x1636 / 1; int32_t x1671 = x1670 + 1; int32_t x1675 = 16384 * x1671; int32_t x1676 = x1675 * x1671; int32_t x1672 = x1671 * x1671; int32_t x1673 = 256 * x1672; int32_t x1674 = 64 * x1673; bool x1693 = x1531 == 1; bool x1694 = x1531 == x1671; bool x1695 = x1693 || x1694; bool x1696; if (x1695) { x1696 = x1695; } else { x1696 = false; } bool x1711 = x1671 >= 1; bool x1712; if (x1711) { x1712 = x1711; } else { x1712 = false; } int32_t x1717 = x1670 / 1; int32_t x1718 = x1717 + 1; int32_t x1722 = 4096 * x1718; int32_t x1723 = x1722 * x1718; int32_t x1719 = x1718 * x1718; int32_t x1720 = 64 * x1719; int32_t x1721 = 64 * x1720; int32_t x1745 = x1718 + 2; bool x1746 = x1745 >= 3; bool x1747; if (x1746) { x1747 = x1746; } else { x1747 = false; } int32_t x1752 = x1745 - 3; int32_t x1753 = x1752 / 1; int32_t x1754 = x1753 + 1; int32_t x1758 = 4096 * x1754; int32_t x1759 = x1758 * x1754; int32_t x1755 = x1754 * x1754; int32_t x1756 = 64 * x1755; int32_t x1757 = 64 * x1756; bool x1781 = x1754 >= 1; bool x1782; if (x1781) { x1782 = x1781; } else { x1782 = false; } int32_t x1787 = x1753 / 1; int32_t x1788 = x1787 + 1; int32_t x1792 = 16384 * x1788; int32_t x1793 = x1792 * x1788; int32_t x1789 = x1788 * x1788; int32_t x1790 = 256 * x1789; int32_t x1791 = 64 * x1790; bool x1810 = x1671 == 1; bool x1811 = x1671 == x1788; bool x1812 = x1810 || x1811; bool x1813; if (x1812) { x1813 = x1812; } else { x1813 = false; } bool x1828 = x1788 >= 1; bool x1829; if (x1828) { x1829 = x1828; } else { x1829 = false; } int32_t x1834 = x1787 / 1; int32_t x1835 = x1834 + 1; int32_t x1839 = 8192 * x1835; int32_t x1840 = x1839 * x1835; int32_t x1836 = x1835 * x1835; int32_t x1837 = 128 * x1836; int32_t x1838 = 64 * x1837; int32_t x1862 = x1835 + 2; bool x1863 = x1862 >= 3; bool x1864; if (x1863) { x1864 = x1863; } else { x1864 = false; } int32_t x1869 = x1862 - 3; int32_t x1870 = x1869 / 2; int32_t x1871 = x1870 + 1; int32_t x1875 = 8192 * x1871; int32_t x1876 = x1875 * x1871; int32_t x1872 = x1871 * x1871; int32_t x1873 = 128 * x1872; int32_t x1874 = 64 * x1873; bool x1898 = x1871 >= 1; bool x1899; if (x1898) { x1899 = x1898; } else { x1899 = false; } int32_t x1904 = x1870 / 1; int32_t x1905 = x1904 + 1; int32_t x1909 = 32768 * x1905; int32_t x1910 = x1909 * x1905; int32_t x1906 = x1905 * x1905; int32_t x1907 = 512 * x1906; int32_t x1908 = 64 * x1907; int32_t x1930 = x1787 / 2; int32_t x1931 = x1930 + 1; int32_t x1935 = 32768 * x1931; int32_t x1936 = x1935 * x1931; int32_t x1932 = x1931 * x1931; int32_t x1933 = 512 * x1932; int32_t x1934 = 64 * x1933; bool x1953 = x1931 == 1; bool x1954 = x1931 == x1905; bool x1955 = x1953 || x1954; bool x1956; if (x1955) { x1956 = x1955; } else { x1956 = false; } bool x1971 = x1905 >= 1; bool x1972; if (x1971) { x1972 = x1971; } else { x1972 = false; } int32_t x1977 = x1904 / 1; int32_t x1978 = x1977 + 1; int32_t x1982 = 8192 * x1978; int32_t x1983 = x1982 * x1978; int32_t x1979 = x1978 * x1978; int32_t x1980 = 128 * x1979; int32_t x1981 = 64 * x1980; int32_t x2005 = x1978 + 2; bool x2006 = x2005 >= 3; bool x2007; if (x2006) { x2007 = x2006; } else { x2007 = false; } int32_t x2012 = x2005 - 3; int32_t x2013 = x2012 / 1; int32_t x2014 = x2013 + 1; int32_t x2018 = 8192 * x2014; int32_t x2019 = x2018 * x2014; int32_t x2015 = x2014 * x2014; int32_t x2016 = 128 * x2015; int32_t x2017 = 64 * x2016; bool x2041 = x2014 >= 1; bool x2042; if (x2041) { x2042 = x2041; } else { x2042 = false; } int32_t x2047 = x2013 / 1; int32_t x2048 = x2047 + 1; int32_t x2052 = 32768 * x2048; int32_t x2053 = x2052 * x2048; int32_t x2049 = x2048 * x2048; int32_t x2050 = 512 * x2049; int32_t x2051 = 64 * x2050; bool x2070 = x1905 == 1; bool x2071 = x1905 == x2048; bool x2072 = x2070 || x2071; bool x2073; if (x2072) { x2073 = x2072; } else { x2073 = false; } bool x2088 = x2048 >= 1; bool x2089; if (x2088) { x2089 = x2088; } else { x2089 = false; } int32_t x2094 = x2047 / 1; int32_t x2095 = x2094 + 1; int32_t x2099 = 8192 * x2095; int32_t x2100 = x2099 * x2095; int32_t x2096 = x2095 * x2095; int32_t x2097 = 128 * x2096; int32_t x2098 = 64 * x2097; int32_t x2122 = x2095 + 2; bool x2123 = x2122 >= 3; bool x2124; if (x2123) { x2124 = x2123; } else { x2124 = false; } int32_t x2129 = x2122 - 3; int32_t x2130 = x2129 / 1; int32_t x2131 = x2130 + 1; int32_t x2135 = 8192 * x2131; int32_t x2136 = x2135 * x2131; int32_t x2132 = x2131 * x2131; int32_t x2133 = 128 * x2132; int32_t x2134 = 64 * x2133; bool x2158 = x2131 >= 1; bool x2159; if (x2158) { x2159 = x2158; } else { x2159 = false; } int32_t x2164 = x2130 / 1; int32_t x2165 = x2164 + 1; int32_t x2169 = 32768 * x2165; int32_t x2170 = x2169 * x2165; int32_t x2166 = x2165 * x2165; int32_t x2167 = 512 * x2166; int32_t x2168 = 64 * x2167; bool x2187 = x2048 == 1; bool x2188 = x2048 == x2165; bool x2189 = x2187 || x2188; bool x2190; if (x2189) { x2190 = x2189; } else { x2190 = false; } bool x2205 = x2165 >= 1; bool x2206; if (x2205) { x2206 = x2205; } else { x2206 = false; } int32_t x2211 = x2164 / 1; int32_t x2212 = x2211 + 1; int32_t x2216 = 8192 * x2212; int32_t x2217 = x2216 * x2212; int32_t x2213 = x2212 * x2212; int32_t x2214 = 128 * x2213; int32_t x2215 = 64 * x2214; int32_t x2239 = x2212 + 2; bool x2240 = x2239 >= 3; bool x2241; if (x2240) { x2241 = x2240; } else { x2241 = false; } int32_t x2246 = x2239 - 3; int32_t x2247 = x2246 / 1; int32_t x2248 = x2247 + 1; int32_t x2252 = 8192 * x2248; int32_t x2253 = x2252 * x2248; int32_t x2249 = x2248 * x2248; int32_t x2250 = 128 * x2249; int32_t x2251 = 64 * x2250; bool x2275 = x2248 >= 1; bool x2276; if (x2275) { x2276 = x2275; } else { x2276 = false; } int32_t x2281 = x2247 / 1; int32_t x2282 = x2281 + 1; int32_t x2286 = 32768 * x2282; int32_t x2287 = x2286 * x2282; int32_t x2283 = x2282 * x2282; int32_t x2284 = 512 * x2283; int32_t x2285 = 64 * x2284; bool x2304 = x2165 == 1; bool x2305 = x2165 == x2282; bool x2306 = x2304 || x2305; bool x2307; if (x2306) { x2307 = x2306; } else { x2307 = false; } bool x2322 = x2282 >= 1; bool x2323; if (x2322) { x2323 = x2322; } else { x2323 = false; } int32_t x2328 = x2281 / 1; int32_t x2329 = x2328 + 1; int32_t x2333 = 16384 * x2329; int32_t x2334 = x2333 * x2329; int32_t x2330 = x2329 * x2329; int32_t x2331 = 256 * x2330; int32_t x2332 = 64 * x2331; int32_t x2356 = x2329 + 2; bool x2357 = x2356 >= 3; bool x2358; if (x2357) { x2358 = x2357; } else { x2358 = false; } int32_t x2363 = x2356 - 3; int32_t x2364 = x2363 / 2; int32_t x2365 = x2364 + 1; int32_t x2369 = 16384 * x2365; int32_t x2370 = x2369 * x2365; int32_t x2366 = x2365 * x2365; int32_t x2367 = 256 * x2366; int32_t x2368 = 64 * x2367; bool x2392 = x2365 >= 1; bool x2393; if (x2392) { x2393 = x2392; } else { x2393 = false; } int32_t x2398 = x2364 / 1; int32_t x2399 = x2398 + 1; int32_t x2403 = 65536 * x2399; int32_t x2404 = x2403 * x2399; int32_t x2400 = x2399 * x2399; int32_t x2401 = 1024 * x2400; int32_t x2402 = 64 * x2401; int32_t x2424 = x2281 / 2; int32_t x2425 = x2424 + 1; int32_t x2429 = 65536 * x2425; int32_t x2430 = x2429 * x2425; int32_t x2426 = x2425 * x2425; int32_t x2427 = 1024 * x2426; int32_t x2428 = 64 * x2427; bool x2447 = x2425 == 1; bool x2448 = x2425 == x2399; bool x2449 = x2447 || x2448; bool x2450; if (x2449) { x2450 = x2449; } else { x2450 = false; } bool x2465 = x2399 >= 1; bool x2466; if (x2465) { x2466 = x2465; } else { x2466 = false; } int32_t x2471 = x2398 / 1; int32_t x2472 = x2471 + 1; int32_t x2476 = 16384 * x2472; int32_t x2477 = x2476 * x2472; int32_t x2473 = x2472 * x2472; int32_t x2474 = 256 * x2473; int32_t x2475 = 64 * x2474; int32_t x2499 = x2472 + 2; bool x2500 = x2499 >= 3; bool x2501; if (x2500) { x2501 = x2500; } else { x2501 = false; } int32_t x2506 = x2499 - 3; int32_t x2507 = x2506 / 1; int32_t x2508 = x2507 + 1; int32_t x2512 = 16384 * x2508; int32_t x2513 = x2512 * x2508; int32_t x2509 = x2508 * x2508; int32_t x2510 = 256 * x2509; int32_t x2511 = 64 * x2510; bool x2535 = x2508 >= 1; bool x2536; if (x2535) { x2536 = x2535; } else { x2536 = false; } int32_t x2541 = x2507 / 1; int32_t x2542 = x2541 + 1; int32_t x2546 = 65536 * x2542; int32_t x2547 = x2546 * x2542; int32_t x2543 = x2542 * x2542; int32_t x2544 = 1024 * x2543; int32_t x2545 = 64 * x2544; bool x2564 = x2399 == 1; bool x2565 = x2399 == x2542; bool x2566 = x2564 || x2565; bool x2567; if (x2566) { x2567 = x2566; } else { x2567 = false; } bool x2582 = x2542 >= 1; bool x2583; if (x2582) { x2583 = x2582; } else { x2583 = false; } int32_t x2588 = x2541 / 1; int32_t x2589 = x2588 + 1; int32_t x2593 = 16384 * x2589; int32_t x2594 = x2593 * x2589; int32_t x2590 = x2589 * x2589; int32_t x2591 = 256 * x2590; int32_t x2592 = 64 * x2591; int32_t x2616 = x2589 + 2; bool x2617 = x2616 >= 3; bool x2618; if (x2617) { x2618 = x2617; } else { x2618 = false; } int32_t x2623 = x2616 - 3; int32_t x2624 = x2623 / 1; int32_t x2625 = x2624 + 1; int32_t x2629 = 16384 * x2625; int32_t x2630 = x2629 * x2625; int32_t x2626 = x2625 * x2625; int32_t x2627 = 256 * x2626; int32_t x2628 = 64 * x2627; bool x2652 = x2625 >= 1; bool x2653; if (x2652) { x2653 = x2652; } else { x2653 = false; } int32_t x2658 = x2624 / 1; int32_t x2659 = x2658 + 1; int32_t x2663 = 65536 * x2659; int32_t x2664 = x2663 * x2659; int32_t x2660 = x2659 * x2659; int32_t x2661 = 1024 * x2660; int32_t x2662 = 64 * x2661; bool x2681 = x2542 == 1; bool x2682 = x2542 == x2659; bool x2683 = x2681 || x2682; bool x2684; if (x2683) { x2684 = x2683; } else { x2684 = false; } bool x2699 = x2659 >= 1; bool x2700; if (x2699) { x2700 = x2699; } else { x2700 = false; } int32_t x2705 = x2658 / 1; int32_t x2706 = x2705 + 1; int32_t x2710 = 16384 * x2706; int32_t x2711 = x2710 * x2706; int32_t x2707 = x2706 * x2706; int32_t x2708 = 256 * x2707; int32_t x2709 = 64 * x2708; int32_t x2733 = x2706 + 2; bool x2734 = x2733 >= 3; bool x2735; if (x2734) { x2735 = x2734; } else { x2735 = false; } int32_t x2740 = x2733 - 3; int32_t x2741 = x2740 / 1; int32_t x2742 = x2741 + 1; int32_t x2746 = 16384 * x2742; int32_t x2747 = x2746 * x2742; int32_t x2743 = x2742 * x2742; int32_t x2744 = 256 * x2743; int32_t x2745 = 64 * x2744; bool x2769 = x2742 >= 1; bool x2770; if (x2769) { x2770 = x2769; } else { x2770 = false; } int32_t x2775 = x2741 / 1; int32_t x2776 = x2775 + 1; int32_t x2780 = 65536 * x2776; int32_t x2781 = x2780 * x2776; int32_t x2777 = x2776 * x2776; int32_t x2778 = 1024 * x2777; int32_t x2779 = 64 * x2778; bool x2798 = x2659 == 1; bool x2799 = x2659 == x2776; bool x2800 = x2798 || x2799; bool x2801; if (x2800) { x2801 = x2800; } else { x2801 = false; } bool x2816 = x2776 >= 1; bool x2817; if (x2816) { x2817 = x2816; } else { x2817 = false; } int32_t x2822 = x2775 / 1; int32_t x2823 = x2822 + 1; int32_t x2827 = 16384 * x2823; int32_t x2828 = x2827 * x2823; int32_t x2824 = x2823 * x2823; int32_t x2825 = 256 * x2824; int32_t x2826 = 64 * x2825; int32_t x2850 = x2823 + 2; bool x2851 = x2850 >= 3; bool x2852; if (x2851) { x2852 = x2851; } else { x2852 = false; } int32_t x2857 = x2850 - 3; int32_t x2858 = x2857 / 1; int32_t x2859 = x2858 + 1; int32_t x2863 = 16384 * x2859; int32_t x2864 = x2863 * x2859; int32_t x2860 = x2859 * x2859; int32_t x2861 = 256 * x2860; int32_t x2862 = 64 * x2861; bool x2886 = x2859 >= 1; bool x2887; if (x2886) { x2887 = x2886; } else { x2887 = false; } int32_t x2892 = x2858 / 1; int32_t x2893 = x2892 + 1; int32_t x2897 = 65536 * x2893; int32_t x2898 = x2897 * x2893; int32_t x2894 = x2893 * x2893; int32_t x2895 = 1024 * x2894; int32_t x2896 = 64 * x2895; bool x2915 = x2776 == 1; bool x2916 = x2776 == x2893; bool x2917 = x2915 || x2916; bool x2918; if (x2917) { x2918 = x2917; } else { x2918 = false; } bool x2933 = x2893 >= 1; bool x2934; if (x2933) { x2934 = x2933; } else { x2934 = false; } int32_t x2939 = x2892 / 1; int32_t x2940 = x2939 + 1; int32_t x2944 = 16384 * x2940; int32_t x2945 = x2944 * x2940; int32_t x2941 = x2940 * x2940; int32_t x2942 = 256 * x2941; int32_t x2943 = 64 * x2942; int32_t x2967 = x2940 + 2; bool x2968 = x2967 >= 3; bool x2969; if (x2968) { x2969 = x2968; } else { x2969 = false; } int32_t x2974 = x2967 - 3; int32_t x2975 = x2974 / 1; int32_t x2976 = x2975 + 1; int32_t x2980 = 16384 * x2976; int32_t x2981 = x2980 * x2976; int32_t x2977 = x2976 * x2976; int32_t x2978 = 256 * x2977; int32_t x2979 = 64 * x2978; bool x3003 = x2976 >= 1; bool x3004; if (x3003) { x3004 = x3003; } else { x3004 = false; } int32_t x3009 = x2975 / 1; int32_t x3010 = x3009 + 1; int32_t x3014 = 65536 * x3010; int32_t x3015 = x3014 * x3010; int32_t x3011 = x3010 * x3010; int32_t x3012 = 1024 * x3011; int32_t x3013 = 64 * x3012; bool x3032 = x2893 == 1; bool x3033 = x2893 == x3010; bool x3034 = x3032 || x3033; bool x3035; if (x3034) { x3035 = x3034; } else { x3035 = false; } bool x3050 = x3010 >= 1; bool x3051; if (x3050) { x3051 = x3050; } else { x3051 = false; } int32_t x3056 = x3009 / 1; int32_t x3057 = x3056 + 1; int32_t x3061 = 32768 * x3057; int32_t x3062 = x3061 * x3057; int32_t x3058 = x3057 * x3057; int32_t x3059 = 512 * x3058; int32_t x3060 = 64 * x3059; int32_t x3084 = x3057 + 2; bool x3085 = x3084 >= 3; bool x3086; if (x3085) { x3086 = x3085; } else { x3086 = false; } int32_t x3091 = x3084 - 3; int32_t x3092 = x3091 / 2; int32_t x3093 = x3092 + 1; int32_t x3097 = 32768 * x3093; int32_t x3098 = x3097 * x3093; int32_t x3094 = x3093 * x3093; int32_t x3095 = 512 * x3094; int32_t x3096 = 64 * x3095; bool x3120 = x3093 >= 1; bool x3121; if (x3120) { x3121 = x3120; } else { x3121 = false; } int32_t x3126 = x3092 / 1; int32_t x3127 = x3126 + 1; int32_t x3131 = 131072 * x3127; int32_t x3132 = x3131 * x3127; int32_t x3128 = x3127 * x3127; int32_t x3129 = 2048 * x3128; int32_t x3130 = 64 * x3129; int32_t x3152 = x3009 / 2; int32_t x3153 = x3152 + 1; int32_t x3157 = 131072 * x3153; int32_t x3158 = x3157 * x3153; int32_t x3154 = x3153 * x3153; int32_t x3155 = 2048 * x3154; int32_t x3156 = 64 * x3155; bool x3175 = x3153 == 1; bool x3176 = x3153 == x3127; bool x3177 = x3175 || x3176; bool x3178; if (x3177) { x3178 = x3177; } else { x3178 = false; } bool x3193 = x3127 >= 1; bool x3194; if (x3193) { x3194 = x3193; } else { x3194 = false; } int32_t x3199 = x3126 / 1; int32_t x3200 = x3199 + 1; int32_t x3204 = 32768 * x3200; int32_t x3205 = x3204 * x3200; int32_t x3201 = x3200 * x3200; int32_t x3202 = 512 * x3201; int32_t x3203 = 64 * x3202; int32_t x3227 = x3200 + 2; bool x3228 = x3227 >= 3; bool x3229; if (x3228) { x3229 = x3228; } else { x3229 = false; } int32_t x3234 = x3227 - 3; int32_t x3235 = x3234 / 1; int32_t x3236 = x3235 + 1; int32_t x3240 = 32768 * x3236; int32_t x3241 = x3240 * x3236; int32_t x3237 = x3236 * x3236; int32_t x3238 = 512 * x3237; int32_t x3239 = 64 * x3238; bool x3263 = x3236 >= 1; bool x3264; if (x3263) { x3264 = x3263; } else { x3264 = false; } int32_t x3269 = x3235 / 1; int32_t x3270 = x3269 + 1; int32_t x3274 = 131072 * x3270; int32_t x3275 = x3274 * x3270; int32_t x3271 = x3270 * x3270; int32_t x3272 = 2048 * x3271; int32_t x3273 = 64 * x3272; bool x3292 = x3127 == 1; bool x3293 = x3127 == x3270; bool x3294 = x3292 || x3293; bool x3295; if (x3294) { x3295 = x3294; } else { x3295 = false; } bool x3310 = x3270 >= 1; bool x3311; if (x3310) { x3311 = x3310; } else { x3311 = false; } int32_t x3316 = x3269 / 1; int32_t x3317 = x3316 + 1; int32_t x3321 = 32768 * x3317; int32_t x3322 = x3321 * x3317; int32_t x3318 = x3317 * x3317; int32_t x3319 = 512 * x3318; int32_t x3320 = 64 * x3319; int32_t x3344 = x3317 + 2; bool x3345 = x3344 >= 3; bool x3346; if (x3345) { x3346 = x3345; } else { x3346 = false; } int32_t x3351 = x3344 - 3; int32_t x3352 = x3351 / 1; int32_t x3353 = x3352 + 1; int32_t x3357 = 32768 * x3353; int32_t x3358 = x3357 * x3353; int32_t x3354 = x3353 * x3353; int32_t x3355 = 512 * x3354; int32_t x3356 = 64 * x3355; bool x3380 = x3353 >= 1; bool x3381; if (x3380) { x3381 = x3380; } else { x3381 = false; } int32_t x3386 = x3352 / 1; int32_t x3387 = x3386 + 1; int32_t x3391 = 131072 * x3387; int32_t x3392 = x3391 * x3387; int32_t x3388 = x3387 * x3387; int32_t x3389 = 2048 * x3388; int32_t x3390 = 64 * x3389; bool x3409 = x3270 == 1; bool x3410 = x3270 == x3387; bool x3411 = x3409 || x3410; bool x3412; if (x3411) { x3412 = x3411; } else { x3412 = false; } bool x3427 = x3387 >= 2; bool x3428; if (x3427) { x3428 = x3427; } else { x3428 = false; } int32_t x3437 = x3387 - 2; int32_t x3438 = x3437 / 1; int32_t x3439 = x3438 + 1; int32_t x3443 = 131072 * x3439; int32_t x3444 = x3443 * x3439; int32_t x3440 = x3439 * x3439; int32_t x3441 = 2048 * x3440; int32_t x3442 = 64 * x3441; bool x3700 = x3387 == x3270; bool x3701; if (x3700) { x3701 = x3700; } else { x3701 = false; } bool x3702 = x3387 == 1; bool x3703 = x3702 || x3700; bool x3704; if (x3703) { x3704 = x3703; } else { x3704 = false; } bool x3771 = x3270 == x3127; bool x3772; if (x3771) { x3772 = x3771; } else { x3772 = false; } bool x3773 = x3409 || x3771; bool x3774; if (x3773) { x3774 = x3773; } else { x3774 = false; } bool x3841 = x3127 == x3153; bool x3842; if (x3841) { x3842 = x3841; } else { x3842 = false; } bool x3843 = x3292 || x3841; bool x3844; if (x3843) { x3844 = x3843; } else { x3844 = false; } bool x3923 = x3010 == x2893; bool x3924; if (x3923) { x3924 = x3923; } else { x3924 = false; } bool x3925 = x3010 == 1; bool x3926 = x3925 || x3923; bool x3927; if (x3926) { x3927 = x3926; } else { x3927 = false; } bool x3994 = x2893 == x2776; bool x3995; if (x3994) { x3995 = x3994; } else { x3995 = false; } bool x3996 = x3032 || x3994; bool x3997; if (x3996) { x3997 = x3996; } else { x3997 = false; } bool x4064 = x2776 == x2659; bool x4065; if (x4064) { x4065 = x4064; } else { x4065 = false; } bool x4066 = x2915 || x4064; bool x4067; if (x4066) { x4067 = x4066; } else { x4067 = false; } bool x4134 = x2659 == x2542; bool x4135; if (x4134) { x4135 = x4134; } else { x4135 = false; } bool x4136 = x2798 || x4134; bool x4137; if (x4136) { x4137 = x4136; } else { x4137 = false; } bool x4204 = x2542 == x2399; bool x4205; if (x4204) { x4205 = x4204; } else { x4205 = false; } bool x4206 = x2681 || x4204; bool x4207; if (x4206) { x4207 = x4206; } else { x4207 = false; } bool x4274 = x2399 == x2425; bool x4275; if (x4274) { x4275 = x4274; } else { x4275 = false; } bool x4276 = x2564 || x4274; bool x4277; if (x4276) { x4277 = x4276; } else { x4277 = false; } bool x4356 = x2282 == x2165; bool x4357; if (x4356) { x4357 = x4356; } else { x4357 = false; } bool x4358 = x2282 == 1; bool x4359 = x4358 || x4356; bool x4360; if (x4359) { x4360 = x4359; } else { x4360 = false; } bool x4427 = x2165 == x2048; bool x4428; if (x4427) { x4428 = x4427; } else { x4428 = false; } bool x4429 = x2304 || x4427; bool x4430; if (x4429) { x4430 = x4429; } else { x4430 = false; } bool x4497 = x2048 == x1905; bool x4498; if (x4497) { x4498 = x4497; } else { x4498 = false; } bool x4499 = x2187 || x4497; bool x4500; if (x4499) { x4500 = x4499; } else { x4500 = false; } bool x4567 = x1905 == x1931; bool x4568; if (x4567) { x4568 = x4567; } else { x4568 = false; } bool x4569 = x2070 || x4567; bool x4570; if (x4569) { x4570 = x4569; } else { x4570 = false; } bool x4649 = x1788 == x1671; bool x4650; if (x4649) { x4650 = x4649; } else { x4650 = false; } bool x4651 = x1788 == 1; bool x4652 = x4651 || x4649; bool x4653; if (x4652) { x4653 = x4652; } else { x4653 = false; } bool x4720 = x1671 == x1531; bool x4721; if (x4720) { x4721 = x4720; } else { x4721 = false; } bool x4722 = x1810 || x4720; bool x4723; if (x4722) { x4723 = x4722; } else { x4723 = false; } bool x4790 = x1531 == x1461; bool x4791; if (x4790) { x4791 = x4790; } else { x4791 = false; } bool x4792 = x1693 || x4790; bool x4793; if (x4792) { x4793 = x4792; } else { x4793 = false; } int32_t x6494 = x1394 / 10; double x6499 = (double)x11; int64_t x6525 = (int64_t)x11; float x6529 = (float)x11; for(int x1386=0; x1386 < 4; x1386++) { struct timeval begin_1, end_1, diff_1; float x1388 = 0.0f; float x1389 = x1388; float x1390 = x1389; int32_t x1391 = x1386 + 1; printf("Start training epoch %d\n",x1391); gettimeofday(&begin_1, NULL); for(int x1396=0; x1396 < x1394; x1396++) { int32_t x1397 = x1396 * 64; int32_t x1398 = x1397 * 3072; float* x1399 = x13+x1398; int* x1400 = x14+x1397; // Tensor 'toGPU' invocation. float* x1402 = (float*)myGpuMalloc(196608 * sizeof(float)); CUDA_CALL(hipMemcpy(x1402, x1399, 196608 * sizeof(float), hipMemcpyHostToDevice)); float* x1404 = (float*)myGpuMalloc(2 * sizeof(float)); int* x1405 = (int32_t*)myGpuMalloc(64 * sizeof(int32_t)); CUDA_CALL(hipMemcpy(x1405, x1400, 64 * sizeof(int32_t), hipMemcpyHostToDevice)); float* x1407 = (float*)myGpuMalloc(1 * sizeof(float)); float* x1408 = (float*)myGpuMalloc(1 * sizeof(float)); // allocate memory to save the final loss in CPU Tensor float* x1410 = (float*)myGpuMalloc(1 * sizeof(float)); float* x1418 = (float*)myGpuMalloc(x1417 * sizeof(float)); float* x1419 = (float*)myMalloc(1 * sizeof(float));; x1419[0] = 0.0f; float* x1421 = (float*)myMalloc(1 * sizeof(float));; x1421[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 3, 32, 32)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 3, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1421, in_desc, x1402, filt_desc, x751, conv_desc, algo, ws_data, ws_size, x1419, out_desc, x1418)); }; float* x1424 = (float*)myGpuMalloc(x1417 * sizeof(float)); float* x1425 = (float*)myGpuMalloc(x1415 * sizeof(float)); float* x1426 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1427 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1428 = (float*)myMalloc(1 * sizeof(float));; x1428[0] = 0.0f; float* x1430 = (float*)myMalloc(1 * sizeof(float));; x1430[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1430, x1428, in_desc, x1418, out_desc, x1425, sbmv_desc, x913, x1048, 0.1, x415, x625, 1.0E-5, x1426, x1427)); }; float* x1433 = (float*)myGpuMalloc(x1417 * sizeof(float)); float* x1434 = (float*)myMalloc(1 * sizeof(float));; x1434[0] = 0.0f; float* x1436 = (float*)myMalloc(1 * sizeof(float));; x1436[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1436, x_desc, x1425, x1434, x_desc, x1425)); }; float* x1439 = (float*)myMalloc(1 * sizeof(float));; x1439[0] = 0.0f; float* x1441 = (float*)myMalloc(1 * sizeof(float));; x1441[0] = 1.0f; float* x1451 = (float*)myGpuMalloc(x1450 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x1441, in_desc, x1425, x1439, out_desc, x1451)); }; float* x1453 = (float*)myGpuMalloc(x1450 * sizeof(float)); if (x1455) { } else { assert(false && "ERROR not specified"); } float* x1467 = (float*)myGpuMalloc(x1466 * sizeof(float)); float* x1468 = (float*)myMalloc(1 * sizeof(float));; x1468[0] = 0.0f; float* x1470 = (float*)myMalloc(1 * sizeof(float));; x1470[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1470, in_desc, x1451, filt_desc, x994, conv_desc, algo, ws_data, ws_size, x1468, out_desc, x1467)); }; float* x1473 = (float*)myGpuMalloc(x1466 * sizeof(float)); float* x1474 = (float*)myGpuMalloc(x1464 * sizeof(float)); float* x1475 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1476 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1477 = (float*)myMalloc(1 * sizeof(float));; x1477[0] = 0.0f; float* x1479 = (float*)myMalloc(1 * sizeof(float));; x1479[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1479, x1477, in_desc, x1467, out_desc, x1474, sbmv_desc, x373, x454, 0.1, x637, x448, 1.0E-5, x1475, x1476)); }; float* x1482 = (float*)myGpuMalloc(x1466 * sizeof(float)); float* x1483 = (float*)myMalloc(1 * sizeof(float));; x1483[0] = 0.0f; float* x1485 = (float*)myMalloc(1 * sizeof(float));; x1485[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1485, x_desc, x1474, x1483, x_desc, x1474)); }; if (x1490) { } else { assert(false && "ERROR not specified"); } float* x1503 = (float*)myGpuMalloc(x1502 * sizeof(float)); float* x1504 = (float*)myMalloc(1 * sizeof(float));; x1504[0] = 0.0f; float* x1506 = (float*)myMalloc(1 * sizeof(float));; x1506[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1506, in_desc, x1474, filt_desc, x565, conv_desc, algo, ws_data, ws_size, x1504, out_desc, x1503)); }; float* x1509 = (float*)myGpuMalloc(x1502 * sizeof(float)); float* x1510 = (float*)myGpuMalloc(x1500 * sizeof(float)); float* x1511 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1512 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1513 = (float*)myMalloc(1 * sizeof(float));; x1513[0] = 0.0f; float* x1515 = (float*)myMalloc(1 * sizeof(float));; x1515[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1515, x1513, in_desc, x1503, out_desc, x1510, sbmv_desc, x787, x442, 0.1, x610, x769, 1.0E-5, x1511, x1512)); }; float* x1518 = (float*)myGpuMalloc(x1502 * sizeof(float)); float* x1519 = (float*)myMalloc(1 * sizeof(float));; x1519[0] = 0.0f; float* x1521 = (float*)myMalloc(1 * sizeof(float));; x1521[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1521, x_desc, x1510, x1519, x_desc, x1510)); }; if (x1525) { } else { assert(false && "ERROR not specified"); } float* x1537 = (float*)myGpuMalloc(x1536 * sizeof(float)); float* x1538 = (float*)myMalloc(1 * sizeof(float));; x1538[0] = 0.0f; float* x1540 = (float*)myMalloc(1 * sizeof(float));; x1540[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1540, in_desc, x1510, filt_desc, x391, conv_desc, algo, ws_data, ws_size, x1538, out_desc, x1537)); }; float* x1543 = (float*)myGpuMalloc(x1536 * sizeof(float)); float* x1544 = (float*)myGpuMalloc(x1534 * sizeof(float)); float* x1545 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1546 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1547 = (float*)myMalloc(1 * sizeof(float));; x1547[0] = 0.0f; float* x1549 = (float*)myMalloc(1 * sizeof(float));; x1549[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1549, x1547, in_desc, x1537, out_desc, x1544, sbmv_desc, x892, x673, 0.1, x508, x403, 1.0E-5, x1545, x1546)); }; float* x1552 = (float*)myGpuMalloc(x1536 * sizeof(float)); if (x1455) { } else { assert(false && "ERROR not specified"); } float* x1560 = (float*)myGpuMalloc(x1559 * sizeof(float)); float* x1561 = (float*)myMalloc(1 * sizeof(float));; x1561[0] = 0.0f; float* x1563 = (float*)myMalloc(1 * sizeof(float));; x1563[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1563, in_desc, x1451, filt_desc, x781, conv_desc, algo, ws_data, ws_size, x1561, out_desc, x1560)); }; float* x1566 = (float*)myGpuMalloc(x1559 * sizeof(float)); float* x1567 = (float*)myGpuMalloc(x1557 * sizeof(float)); float* x1568 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1569 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1570 = (float*)myMalloc(1 * sizeof(float));; x1570[0] = 0.0f; float* x1572 = (float*)myMalloc(1 * sizeof(float));; x1572[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1572, x1570, in_desc, x1560, out_desc, x1567, sbmv_desc, x523, x904, 0.1, x1087, x1024, 1.0E-5, x1568, x1569)); }; float* x1575 = (float*)myGpuMalloc(x1559 * sizeof(float)); if (x1579) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1461) x Sym(1461), res: x Const(64) x Const(256) x Sym(1531) x Sym(1531)"); } float* x1584 = (float*)myMalloc(1 * sizeof(float));; x1584[0] = 1.0f; float* x1586 = (float*)myMalloc(1 * sizeof(float));; x1586[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1584, bias_desc, x1567, x1586, out_desc, x1544)); }; float* x1589 = (float*)myMalloc(1 * sizeof(float));; x1589[0] = 0.0f; float* x1591 = (float*)myMalloc(1 * sizeof(float));; x1591[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1591, x_desc, x1544, x1589, x_desc, x1544)); }; if (x1595) { } else { assert(false && "ERROR not specified"); } float* x1607 = (float*)myGpuMalloc(x1606 * sizeof(float)); float* x1608 = (float*)myMalloc(1 * sizeof(float));; x1608[0] = 0.0f; float* x1610 = (float*)myMalloc(1 * sizeof(float));; x1610[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1610, in_desc, x1544, filt_desc, x808, conv_desc, algo, ws_data, ws_size, x1608, out_desc, x1607)); }; float* x1613 = (float*)myGpuMalloc(x1606 * sizeof(float)); float* x1614 = (float*)myGpuMalloc(x1604 * sizeof(float)); float* x1615 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1616 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1617 = (float*)myMalloc(1 * sizeof(float));; x1617[0] = 0.0f; float* x1619 = (float*)myMalloc(1 * sizeof(float));; x1619[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1619, x1617, in_desc, x1607, out_desc, x1614, sbmv_desc, x721, x475, 0.1, x325, x601, 1.0E-5, x1615, x1616)); }; float* x1622 = (float*)myGpuMalloc(x1606 * sizeof(float)); float* x1623 = (float*)myMalloc(1 * sizeof(float));; x1623[0] = 0.0f; float* x1625 = (float*)myMalloc(1 * sizeof(float));; x1625[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1625, x_desc, x1614, x1623, x_desc, x1614)); }; if (x1630) { } else { assert(false && "ERROR not specified"); } float* x1643 = (float*)myGpuMalloc(x1642 * sizeof(float)); float* x1644 = (float*)myMalloc(1 * sizeof(float));; x1644[0] = 0.0f; float* x1646 = (float*)myMalloc(1 * sizeof(float));; x1646[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1646, in_desc, x1614, filt_desc, x544, conv_desc, algo, ws_data, ws_size, x1644, out_desc, x1643)); }; float* x1649 = (float*)myGpuMalloc(x1642 * sizeof(float)); float* x1650 = (float*)myGpuMalloc(x1640 * sizeof(float)); float* x1651 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1652 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1653 = (float*)myMalloc(1 * sizeof(float));; x1653[0] = 0.0f; float* x1655 = (float*)myMalloc(1 * sizeof(float));; x1655[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1655, x1653, in_desc, x1643, out_desc, x1650, sbmv_desc, x919, x754, 0.1, x427, x1027, 1.0E-5, x1651, x1652)); }; float* x1658 = (float*)myGpuMalloc(x1642 * sizeof(float)); float* x1659 = (float*)myMalloc(1 * sizeof(float));; x1659[0] = 0.0f; float* x1661 = (float*)myMalloc(1 * sizeof(float));; x1661[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1661, x_desc, x1650, x1659, x_desc, x1650)); }; if (x1665) { } else { assert(false && "ERROR not specified"); } float* x1677 = (float*)myGpuMalloc(x1676 * sizeof(float)); float* x1678 = (float*)myMalloc(1 * sizeof(float));; x1678[0] = 0.0f; float* x1680 = (float*)myMalloc(1 * sizeof(float));; x1680[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1680, in_desc, x1650, filt_desc, x685, conv_desc, algo, ws_data, ws_size, x1678, out_desc, x1677)); }; float* x1683 = (float*)myGpuMalloc(x1676 * sizeof(float)); float* x1684 = (float*)myGpuMalloc(x1674 * sizeof(float)); float* x1685 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1686 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1687 = (float*)myMalloc(1 * sizeof(float));; x1687[0] = 0.0f; float* x1689 = (float*)myMalloc(1 * sizeof(float));; x1689[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1689, x1687, in_desc, x1677, out_desc, x1684, sbmv_desc, x469, x316, 0.1, x568, x793, 1.0E-5, x1685, x1686)); }; float* x1692 = (float*)myGpuMalloc(x1676 * sizeof(float)); if (x1696) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1531) x Sym(1531), res: x Const(64) x Const(256) x Sym(1671) x Sym(1671)"); } float* x1701 = (float*)myMalloc(1 * sizeof(float));; x1701[0] = 1.0f; float* x1703 = (float*)myMalloc(1 * sizeof(float));; x1703[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1701, bias_desc, x1544, x1703, out_desc, x1684)); }; float* x1706 = (float*)myMalloc(1 * sizeof(float));; x1706[0] = 0.0f; float* x1708 = (float*)myMalloc(1 * sizeof(float));; x1708[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1708, x_desc, x1684, x1706, x_desc, x1684)); }; if (x1712) { } else { assert(false && "ERROR not specified"); } float* x1724 = (float*)myGpuMalloc(x1723 * sizeof(float)); float* x1725 = (float*)myMalloc(1 * sizeof(float));; x1725[0] = 0.0f; float* x1727 = (float*)myMalloc(1 * sizeof(float));; x1727[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1727, in_desc, x1684, filt_desc, x745, conv_desc, algo, ws_data, ws_size, x1725, out_desc, x1724)); }; float* x1730 = (float*)myGpuMalloc(x1723 * sizeof(float)); float* x1731 = (float*)myGpuMalloc(x1721 * sizeof(float)); float* x1732 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1733 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1734 = (float*)myMalloc(1 * sizeof(float));; x1734[0] = 0.0f; float* x1736 = (float*)myMalloc(1 * sizeof(float));; x1736[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1736, x1734, in_desc, x1724, out_desc, x1731, sbmv_desc, x538, x367, 0.1, x1066, x856, 1.0E-5, x1732, x1733)); }; float* x1739 = (float*)myGpuMalloc(x1723 * sizeof(float)); float* x1740 = (float*)myMalloc(1 * sizeof(float));; x1740[0] = 0.0f; float* x1742 = (float*)myMalloc(1 * sizeof(float));; x1742[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1742, x_desc, x1731, x1740, x_desc, x1731)); }; if (x1747) { } else { assert(false && "ERROR not specified"); } float* x1760 = (float*)myGpuMalloc(x1759 * sizeof(float)); float* x1761 = (float*)myMalloc(1 * sizeof(float));; x1761[0] = 0.0f; float* x1763 = (float*)myMalloc(1 * sizeof(float));; x1763[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1763, in_desc, x1731, filt_desc, x514, conv_desc, algo, ws_data, ws_size, x1761, out_desc, x1760)); }; float* x1766 = (float*)myGpuMalloc(x1759 * sizeof(float)); float* x1767 = (float*)myGpuMalloc(x1757 * sizeof(float)); float* x1768 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1769 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1770 = (float*)myMalloc(1 * sizeof(float));; x1770[0] = 0.0f; float* x1772 = (float*)myMalloc(1 * sizeof(float));; x1772[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1772, x1770, in_desc, x1760, out_desc, x1767, sbmv_desc, x511, x700, 0.1, x832, x649, 1.0E-5, x1768, x1769)); }; float* x1775 = (float*)myGpuMalloc(x1759 * sizeof(float)); float* x1776 = (float*)myMalloc(1 * sizeof(float));; x1776[0] = 0.0f; float* x1778 = (float*)myMalloc(1 * sizeof(float));; x1778[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1778, x_desc, x1767, x1776, x_desc, x1767)); }; if (x1782) { } else { assert(false && "ERROR not specified"); } float* x1794 = (float*)myGpuMalloc(x1793 * sizeof(float)); float* x1795 = (float*)myMalloc(1 * sizeof(float));; x1795[0] = 0.0f; float* x1797 = (float*)myMalloc(1 * sizeof(float));; x1797[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1797, in_desc, x1767, filt_desc, x556, conv_desc, algo, ws_data, ws_size, x1795, out_desc, x1794)); }; float* x1800 = (float*)myGpuMalloc(x1793 * sizeof(float)); float* x1801 = (float*)myGpuMalloc(x1791 * sizeof(float)); float* x1802 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1803 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1804 = (float*)myMalloc(1 * sizeof(float));; x1804[0] = 0.0f; float* x1806 = (float*)myMalloc(1 * sizeof(float));; x1806[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1806, x1804, in_desc, x1794, out_desc, x1801, sbmv_desc, x406, x1036, 0.1, x847, x694, 1.0E-5, x1802, x1803)); }; float* x1809 = (float*)myGpuMalloc(x1793 * sizeof(float)); if (x1813) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1671) x Sym(1671), res: x Const(64) x Const(256) x Sym(1788) x Sym(1788)"); } float* x1818 = (float*)myMalloc(1 * sizeof(float));; x1818[0] = 1.0f; float* x1820 = (float*)myMalloc(1 * sizeof(float));; x1820[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1818, bias_desc, x1684, x1820, out_desc, x1801)); }; float* x1823 = (float*)myMalloc(1 * sizeof(float));; x1823[0] = 0.0f; float* x1825 = (float*)myMalloc(1 * sizeof(float));; x1825[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1825, x_desc, x1801, x1823, x_desc, x1801)); }; if (x1829) { } else { assert(false && "ERROR not specified"); } float* x1841 = (float*)myGpuMalloc(x1840 * sizeof(float)); float* x1842 = (float*)myMalloc(1 * sizeof(float));; x1842[0] = 0.0f; float* x1844 = (float*)myMalloc(1 * sizeof(float));; x1844[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1844, in_desc, x1801, filt_desc, x328, conv_desc, algo, ws_data, ws_size, x1842, out_desc, x1841)); }; float* x1847 = (float*)myGpuMalloc(x1840 * sizeof(float)); float* x1848 = (float*)myGpuMalloc(x1838 * sizeof(float)); float* x1849 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1850 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1851 = (float*)myMalloc(1 * sizeof(float));; x1851[0] = 0.0f; float* x1853 = (float*)myMalloc(1 * sizeof(float));; x1853[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1853, x1851, in_desc, x1841, out_desc, x1848, sbmv_desc, x547, x811, 0.1, x907, x697, 1.0E-5, x1849, x1850)); }; float* x1856 = (float*)myGpuMalloc(x1840 * sizeof(float)); float* x1857 = (float*)myMalloc(1 * sizeof(float));; x1857[0] = 0.0f; float* x1859 = (float*)myMalloc(1 * sizeof(float));; x1859[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1859, x_desc, x1848, x1857, x_desc, x1848)); }; if (x1864) { } else { assert(false && "ERROR not specified"); } float* x1877 = (float*)myGpuMalloc(x1876 * sizeof(float)); float* x1878 = (float*)myMalloc(1 * sizeof(float));; x1878[0] = 0.0f; float* x1880 = (float*)myMalloc(1 * sizeof(float));; x1880[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1880, in_desc, x1848, filt_desc, x376, conv_desc, algo, ws_data, ws_size, x1878, out_desc, x1877)); }; float* x1883 = (float*)myGpuMalloc(x1876 * sizeof(float)); float* x1884 = (float*)myGpuMalloc(x1874 * sizeof(float)); float* x1885 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1886 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1887 = (float*)myMalloc(1 * sizeof(float));; x1887[0] = 0.0f; float* x1889 = (float*)myMalloc(1 * sizeof(float));; x1889[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1889, x1887, in_desc, x1877, out_desc, x1884, sbmv_desc, x1051, x865, 0.1, x679, x424, 1.0E-5, x1885, x1886)); }; float* x1892 = (float*)myGpuMalloc(x1876 * sizeof(float)); float* x1893 = (float*)myMalloc(1 * sizeof(float));; x1893[0] = 0.0f; float* x1895 = (float*)myMalloc(1 * sizeof(float));; x1895[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1895, x_desc, x1884, x1893, x_desc, x1884)); }; if (x1899) { } else { assert(false && "ERROR not specified"); } float* x1911 = (float*)myGpuMalloc(x1910 * sizeof(float)); float* x1912 = (float*)myMalloc(1 * sizeof(float));; x1912[0] = 0.0f; float* x1914 = (float*)myMalloc(1 * sizeof(float));; x1914[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1914, in_desc, x1884, filt_desc, x613, conv_desc, algo, ws_data, ws_size, x1912, out_desc, x1911)); }; float* x1917 = (float*)myGpuMalloc(x1910 * sizeof(float)); float* x1918 = (float*)myGpuMalloc(x1908 * sizeof(float)); float* x1919 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1920 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1921 = (float*)myMalloc(1 * sizeof(float));; x1921[0] = 0.0f; float* x1923 = (float*)myMalloc(1 * sizeof(float));; x1923[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1923, x1921, in_desc, x1911, out_desc, x1918, sbmv_desc, x730, x925, 0.1, x742, x598, 1.0E-5, x1919, x1920)); }; float* x1926 = (float*)myGpuMalloc(x1910 * sizeof(float)); if (x1829) { } else { assert(false && "ERROR not specified"); } float* x1937 = (float*)myGpuMalloc(x1936 * sizeof(float)); float* x1938 = (float*)myMalloc(1 * sizeof(float));; x1938[0] = 0.0f; float* x1940 = (float*)myMalloc(1 * sizeof(float));; x1940[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1940, in_desc, x1801, filt_desc, x1069, conv_desc, algo, ws_data, ws_size, x1938, out_desc, x1937)); }; float* x1943 = (float*)myGpuMalloc(x1936 * sizeof(float)); float* x1944 = (float*)myGpuMalloc(x1934 * sizeof(float)); float* x1945 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1946 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1947 = (float*)myMalloc(1 * sizeof(float));; x1947[0] = 0.0f; float* x1949 = (float*)myMalloc(1 * sizeof(float));; x1949[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1949, x1947, in_desc, x1937, out_desc, x1944, sbmv_desc, x916, x652, 0.1, x421, x364, 1.0E-5, x1945, x1946)); }; float* x1952 = (float*)myGpuMalloc(x1936 * sizeof(float)); if (x1956) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1931) x Sym(1931), res: x Const(64) x Const(512) x Sym(1905) x Sym(1905)"); } float* x1961 = (float*)myMalloc(1 * sizeof(float));; x1961[0] = 1.0f; float* x1963 = (float*)myMalloc(1 * sizeof(float));; x1963[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1961, bias_desc, x1944, x1963, out_desc, x1918)); }; float* x1966 = (float*)myMalloc(1 * sizeof(float));; x1966[0] = 0.0f; float* x1968 = (float*)myMalloc(1 * sizeof(float));; x1968[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1968, x_desc, x1918, x1966, x_desc, x1918)); }; if (x1972) { } else { assert(false && "ERROR not specified"); } float* x1984 = (float*)myGpuMalloc(x1983 * sizeof(float)); float* x1985 = (float*)myMalloc(1 * sizeof(float));; x1985[0] = 0.0f; float* x1987 = (float*)myMalloc(1 * sizeof(float));; x1987[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1987, in_desc, x1918, filt_desc, x1063, conv_desc, algo, ws_data, ws_size, x1985, out_desc, x1984)); }; float* x1990 = (float*)myGpuMalloc(x1983 * sizeof(float)); float* x1991 = (float*)myGpuMalloc(x1981 * sizeof(float)); float* x1992 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1993 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1994 = (float*)myMalloc(1 * sizeof(float));; x1994[0] = 0.0f; float* x1996 = (float*)myMalloc(1 * sizeof(float));; x1996[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1996, x1994, in_desc, x1984, out_desc, x1991, sbmv_desc, x961, x346, 0.1, x595, x826, 1.0E-5, x1992, x1993)); }; float* x1999 = (float*)myGpuMalloc(x1983 * sizeof(float)); float* x2000 = (float*)myMalloc(1 * sizeof(float));; x2000[0] = 0.0f; float* x2002 = (float*)myMalloc(1 * sizeof(float));; x2002[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2002, x_desc, x1991, x2000, x_desc, x1991)); }; if (x2007) { } else { assert(false && "ERROR not specified"); } float* x2020 = (float*)myGpuMalloc(x2019 * sizeof(float)); float* x2021 = (float*)myMalloc(1 * sizeof(float));; x2021[0] = 0.0f; float* x2023 = (float*)myMalloc(1 * sizeof(float));; x2023[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2023, in_desc, x1991, filt_desc, x1000, conv_desc, algo, ws_data, ws_size, x2021, out_desc, x2020)); }; float* x2026 = (float*)myGpuMalloc(x2019 * sizeof(float)); float* x2027 = (float*)myGpuMalloc(x2017 * sizeof(float)); float* x2028 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2029 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2030 = (float*)myMalloc(1 * sizeof(float));; x2030[0] = 0.0f; float* x2032 = (float*)myMalloc(1 * sizeof(float));; x2032[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2032, x2030, in_desc, x2020, out_desc, x2027, sbmv_desc, x319, x580, 0.1, x400, x970, 1.0E-5, x2028, x2029)); }; float* x2035 = (float*)myGpuMalloc(x2019 * sizeof(float)); float* x2036 = (float*)myMalloc(1 * sizeof(float));; x2036[0] = 0.0f; float* x2038 = (float*)myMalloc(1 * sizeof(float));; x2038[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2038, x_desc, x2027, x2036, x_desc, x2027)); }; if (x2042) { } else { assert(false && "ERROR not specified"); } float* x2054 = (float*)myGpuMalloc(x2053 * sizeof(float)); float* x2055 = (float*)myMalloc(1 * sizeof(float));; x2055[0] = 0.0f; float* x2057 = (float*)myMalloc(1 * sizeof(float));; x2057[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2057, in_desc, x2027, filt_desc, x628, conv_desc, algo, ws_data, ws_size, x2055, out_desc, x2054)); }; float* x2060 = (float*)myGpuMalloc(x2053 * sizeof(float)); float* x2061 = (float*)myGpuMalloc(x2051 * sizeof(float)); float* x2062 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2063 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2064 = (float*)myMalloc(1 * sizeof(float));; x2064[0] = 0.0f; float* x2066 = (float*)myMalloc(1 * sizeof(float));; x2066[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2066, x2064, in_desc, x2054, out_desc, x2061, sbmv_desc, x451, x1033, 0.1, x736, x559, 1.0E-5, x2062, x2063)); }; float* x2069 = (float*)myGpuMalloc(x2053 * sizeof(float)); if (x2073) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1905) x Sym(1905), res: x Const(64) x Const(512) x Sym(2048) x Sym(2048)"); } float* x2078 = (float*)myMalloc(1 * sizeof(float));; x2078[0] = 1.0f; float* x2080 = (float*)myMalloc(1 * sizeof(float));; x2080[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2078, bias_desc, x1918, x2080, out_desc, x2061)); }; float* x2083 = (float*)myMalloc(1 * sizeof(float));; x2083[0] = 0.0f; float* x2085 = (float*)myMalloc(1 * sizeof(float));; x2085[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2085, x_desc, x2061, x2083, x_desc, x2061)); }; if (x2089) { } else { assert(false && "ERROR not specified"); } float* x2101 = (float*)myGpuMalloc(x2100 * sizeof(float)); float* x2102 = (float*)myMalloc(1 * sizeof(float));; x2102[0] = 0.0f; float* x2104 = (float*)myMalloc(1 * sizeof(float));; x2104[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2104, in_desc, x2061, filt_desc, x883, conv_desc, algo, ws_data, ws_size, x2102, out_desc, x2101)); }; float* x2107 = (float*)myGpuMalloc(x2100 * sizeof(float)); float* x2108 = (float*)myGpuMalloc(x2098 * sizeof(float)); float* x2109 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2110 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2111 = (float*)myMalloc(1 * sizeof(float));; x2111[0] = 0.0f; float* x2113 = (float*)myMalloc(1 * sizeof(float));; x2113[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2113, x2111, in_desc, x2101, out_desc, x2108, sbmv_desc, x430, x805, 0.1, x631, x322, 1.0E-5, x2109, x2110)); }; float* x2116 = (float*)myGpuMalloc(x2100 * sizeof(float)); float* x2117 = (float*)myMalloc(1 * sizeof(float));; x2117[0] = 0.0f; float* x2119 = (float*)myMalloc(1 * sizeof(float));; x2119[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2119, x_desc, x2108, x2117, x_desc, x2108)); }; if (x2124) { } else { assert(false && "ERROR not specified"); } float* x2137 = (float*)myGpuMalloc(x2136 * sizeof(float)); float* x2138 = (float*)myMalloc(1 * sizeof(float));; x2138[0] = 0.0f; float* x2140 = (float*)myMalloc(1 * sizeof(float));; x2140[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2140, in_desc, x2108, filt_desc, x868, conv_desc, algo, ws_data, ws_size, x2138, out_desc, x2137)); }; float* x2143 = (float*)myGpuMalloc(x2136 * sizeof(float)); float* x2144 = (float*)myGpuMalloc(x2134 * sizeof(float)); float* x2145 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2146 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2147 = (float*)myMalloc(1 * sizeof(float));; x2147[0] = 0.0f; float* x2149 = (float*)myMalloc(1 * sizeof(float));; x2149[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2149, x2147, in_desc, x2137, out_desc, x2144, sbmv_desc, x676, x478, 0.1, x946, x1093, 1.0E-5, x2145, x2146)); }; float* x2152 = (float*)myGpuMalloc(x2136 * sizeof(float)); float* x2153 = (float*)myMalloc(1 * sizeof(float));; x2153[0] = 0.0f; float* x2155 = (float*)myMalloc(1 * sizeof(float));; x2155[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2155, x_desc, x2144, x2153, x_desc, x2144)); }; if (x2159) { } else { assert(false && "ERROR not specified"); } float* x2171 = (float*)myGpuMalloc(x2170 * sizeof(float)); float* x2172 = (float*)myMalloc(1 * sizeof(float));; x2172[0] = 0.0f; float* x2174 = (float*)myMalloc(1 * sizeof(float));; x2174[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2174, in_desc, x2144, filt_desc, x418, conv_desc, algo, ws_data, ws_size, x2172, out_desc, x2171)); }; float* x2177 = (float*)myGpuMalloc(x2170 * sizeof(float)); float* x2178 = (float*)myGpuMalloc(x2168 * sizeof(float)); float* x2179 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2180 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2181 = (float*)myMalloc(1 * sizeof(float));; x2181[0] = 0.0f; float* x2183 = (float*)myMalloc(1 * sizeof(float));; x2183[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2183, x2181, in_desc, x2171, out_desc, x2178, sbmv_desc, x796, x541, 0.1, x370, x964, 1.0E-5, x2179, x2180)); }; float* x2186 = (float*)myGpuMalloc(x2170 * sizeof(float)); if (x2190) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2048) x Sym(2048), res: x Const(64) x Const(512) x Sym(2165) x Sym(2165)"); } float* x2195 = (float*)myMalloc(1 * sizeof(float));; x2195[0] = 1.0f; float* x2197 = (float*)myMalloc(1 * sizeof(float));; x2197[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2195, bias_desc, x2061, x2197, out_desc, x2178)); }; float* x2200 = (float*)myMalloc(1 * sizeof(float));; x2200[0] = 0.0f; float* x2202 = (float*)myMalloc(1 * sizeof(float));; x2202[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2202, x_desc, x2178, x2200, x_desc, x2178)); }; if (x2206) { } else { assert(false && "ERROR not specified"); } float* x2218 = (float*)myGpuMalloc(x2217 * sizeof(float)); float* x2219 = (float*)myMalloc(1 * sizeof(float));; x2219[0] = 0.0f; float* x2221 = (float*)myMalloc(1 * sizeof(float));; x2221[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2221, in_desc, x2178, filt_desc, x691, conv_desc, algo, ws_data, ws_size, x2219, out_desc, x2218)); }; float* x2224 = (float*)myGpuMalloc(x2217 * sizeof(float)); float* x2225 = (float*)myGpuMalloc(x2215 * sizeof(float)); float* x2226 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2227 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2228 = (float*)myMalloc(1 * sizeof(float));; x2228[0] = 0.0f; float* x2230 = (float*)myMalloc(1 * sizeof(float));; x2230[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2230, x2228, in_desc, x2218, out_desc, x2225, sbmv_desc, x412, x1021, 0.1, x1003, x1078, 1.0E-5, x2226, x2227)); }; float* x2233 = (float*)myGpuMalloc(x2217 * sizeof(float)); float* x2234 = (float*)myMalloc(1 * sizeof(float));; x2234[0] = 0.0f; float* x2236 = (float*)myMalloc(1 * sizeof(float));; x2236[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2236, x_desc, x2225, x2234, x_desc, x2225)); }; if (x2241) { } else { assert(false && "ERROR not specified"); } float* x2254 = (float*)myGpuMalloc(x2253 * sizeof(float)); float* x2255 = (float*)myMalloc(1 * sizeof(float));; x2255[0] = 0.0f; float* x2257 = (float*)myMalloc(1 * sizeof(float));; x2257[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2257, in_desc, x2225, filt_desc, x790, conv_desc, algo, ws_data, ws_size, x2255, out_desc, x2254)); }; float* x2260 = (float*)myGpuMalloc(x2253 * sizeof(float)); float* x2261 = (float*)myGpuMalloc(x2251 * sizeof(float)); float* x2262 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2263 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2264 = (float*)myMalloc(1 * sizeof(float));; x2264[0] = 0.0f; float* x2266 = (float*)myMalloc(1 * sizeof(float));; x2266[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2266, x2264, in_desc, x2254, out_desc, x2261, sbmv_desc, x532, x409, 0.1, x1099, x739, 1.0E-5, x2262, x2263)); }; float* x2269 = (float*)myGpuMalloc(x2253 * sizeof(float)); float* x2270 = (float*)myMalloc(1 * sizeof(float));; x2270[0] = 0.0f; float* x2272 = (float*)myMalloc(1 * sizeof(float));; x2272[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2272, x_desc, x2261, x2270, x_desc, x2261)); }; if (x2276) { } else { assert(false && "ERROR not specified"); } float* x2288 = (float*)myGpuMalloc(x2287 * sizeof(float)); float* x2289 = (float*)myMalloc(1 * sizeof(float));; x2289[0] = 0.0f; float* x2291 = (float*)myMalloc(1 * sizeof(float));; x2291[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2291, in_desc, x2261, filt_desc, x460, conv_desc, algo, ws_data, ws_size, x2289, out_desc, x2288)); }; float* x2294 = (float*)myGpuMalloc(x2287 * sizeof(float)); float* x2295 = (float*)myGpuMalloc(x2285 * sizeof(float)); float* x2296 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2297 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2298 = (float*)myMalloc(1 * sizeof(float));; x2298[0] = 0.0f; float* x2300 = (float*)myMalloc(1 * sizeof(float));; x2300[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2300, x2298, in_desc, x2288, out_desc, x2295, sbmv_desc, x763, x457, 0.1, x352, x997, 1.0E-5, x2296, x2297)); }; float* x2303 = (float*)myGpuMalloc(x2287 * sizeof(float)); if (x2307) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2165) x Sym(2165), res: x Const(64) x Const(512) x Sym(2282) x Sym(2282)"); } float* x2312 = (float*)myMalloc(1 * sizeof(float));; x2312[0] = 1.0f; float* x2314 = (float*)myMalloc(1 * sizeof(float));; x2314[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2312, bias_desc, x2178, x2314, out_desc, x2295)); }; float* x2317 = (float*)myMalloc(1 * sizeof(float));; x2317[0] = 0.0f; float* x2319 = (float*)myMalloc(1 * sizeof(float));; x2319[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2319, x_desc, x2295, x2317, x_desc, x2295)); }; if (x2323) { } else { assert(false && "ERROR not specified"); } float* x2335 = (float*)myGpuMalloc(x2334 * sizeof(float)); float* x2336 = (float*)myMalloc(1 * sizeof(float));; x2336[0] = 0.0f; float* x2338 = (float*)myMalloc(1 * sizeof(float));; x2338[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2338, in_desc, x2295, filt_desc, x835, conv_desc, algo, ws_data, ws_size, x2336, out_desc, x2335)); }; float* x2341 = (float*)myGpuMalloc(x2334 * sizeof(float)); float* x2342 = (float*)myGpuMalloc(x2332 * sizeof(float)); float* x2343 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2344 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2345 = (float*)myMalloc(1 * sizeof(float));; x2345[0] = 0.0f; float* x2347 = (float*)myMalloc(1 * sizeof(float));; x2347[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2347, x2345, in_desc, x2335, out_desc, x2342, sbmv_desc, x1105, x358, 0.1, x688, x889, 1.0E-5, x2343, x2344)); }; float* x2350 = (float*)myGpuMalloc(x2334 * sizeof(float)); float* x2351 = (float*)myMalloc(1 * sizeof(float));; x2351[0] = 0.0f; float* x2353 = (float*)myMalloc(1 * sizeof(float));; x2353[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2353, x_desc, x2342, x2351, x_desc, x2342)); }; if (x2358) { } else { assert(false && "ERROR not specified"); } float* x2371 = (float*)myGpuMalloc(x2370 * sizeof(float)); float* x2372 = (float*)myMalloc(1 * sizeof(float));; x2372[0] = 0.0f; float* x2374 = (float*)myMalloc(1 * sizeof(float));; x2374[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2374, in_desc, x2342, filt_desc, x820, conv_desc, algo, ws_data, ws_size, x2372, out_desc, x2371)); }; float* x2377 = (float*)myGpuMalloc(x2370 * sizeof(float)); float* x2378 = (float*)myGpuMalloc(x2368 * sizeof(float)); float* x2379 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2380 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2381 = (float*)myMalloc(1 * sizeof(float));; x2381[0] = 0.0f; float* x2383 = (float*)myMalloc(1 * sizeof(float));; x2383[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2383, x2381, in_desc, x2371, out_desc, x2378, sbmv_desc, x619, x343, 0.1, x982, x592, 1.0E-5, x2379, x2380)); }; float* x2386 = (float*)myGpuMalloc(x2370 * sizeof(float)); float* x2387 = (float*)myMalloc(1 * sizeof(float));; x2387[0] = 0.0f; float* x2389 = (float*)myMalloc(1 * sizeof(float));; x2389[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2389, x_desc, x2378, x2387, x_desc, x2378)); }; if (x2393) { } else { assert(false && "ERROR not specified"); } float* x2405 = (float*)myGpuMalloc(x2404 * sizeof(float)); float* x2406 = (float*)myMalloc(1 * sizeof(float));; x2406[0] = 0.0f; float* x2408 = (float*)myMalloc(1 * sizeof(float));; x2408[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2408, in_desc, x2378, filt_desc, x1102, conv_desc, algo, ws_data, ws_size, x2406, out_desc, x2405)); }; float* x2411 = (float*)myGpuMalloc(x2404 * sizeof(float)); float* x2412 = (float*)myGpuMalloc(x2402 * sizeof(float)); float* x2413 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2414 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2415 = (float*)myMalloc(1 * sizeof(float));; x2415[0] = 0.0f; float* x2417 = (float*)myMalloc(1 * sizeof(float));; x2417[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2417, x2415, in_desc, x2405, out_desc, x2412, sbmv_desc, x349, x646, 0.1, x943, x1096, 1.0E-5, x2413, x2414)); }; float* x2420 = (float*)myGpuMalloc(x2404 * sizeof(float)); if (x2323) { } else { assert(false && "ERROR not specified"); } float* x2431 = (float*)myGpuMalloc(x2430 * sizeof(float)); float* x2432 = (float*)myMalloc(1 * sizeof(float));; x2432[0] = 0.0f; float* x2434 = (float*)myMalloc(1 * sizeof(float));; x2434[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2434, in_desc, x2295, filt_desc, x520, conv_desc, algo, ws_data, ws_size, x2432, out_desc, x2431)); }; float* x2437 = (float*)myGpuMalloc(x2430 * sizeof(float)); float* x2438 = (float*)myGpuMalloc(x2428 * sizeof(float)); float* x2439 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2440 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2441 = (float*)myMalloc(1 * sizeof(float));; x2441[0] = 0.0f; float* x2443 = (float*)myMalloc(1 * sizeof(float));; x2443[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2443, x2441, in_desc, x2431, out_desc, x2438, sbmv_desc, x382, x955, 0.1, x553, x928, 1.0E-5, x2439, x2440)); }; float* x2446 = (float*)myGpuMalloc(x2430 * sizeof(float)); if (x2450) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2425) x Sym(2425), res: x Const(64) x Const(1024) x Sym(2399) x Sym(2399)"); } float* x2455 = (float*)myMalloc(1 * sizeof(float));; x2455[0] = 1.0f; float* x2457 = (float*)myMalloc(1 * sizeof(float));; x2457[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2455, bias_desc, x2438, x2457, out_desc, x2412)); }; float* x2460 = (float*)myMalloc(1 * sizeof(float));; x2460[0] = 0.0f; float* x2462 = (float*)myMalloc(1 * sizeof(float));; x2462[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2462, x_desc, x2412, x2460, x_desc, x2412)); }; if (x2466) { } else { assert(false && "ERROR not specified"); } float* x2478 = (float*)myGpuMalloc(x2477 * sizeof(float)); float* x2479 = (float*)myMalloc(1 * sizeof(float));; x2479[0] = 0.0f; float* x2481 = (float*)myMalloc(1 * sizeof(float));; x2481[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2481, in_desc, x2412, filt_desc, x334, conv_desc, algo, ws_data, ws_size, x2479, out_desc, x2478)); }; float* x2484 = (float*)myGpuMalloc(x2477 * sizeof(float)); float* x2485 = (float*)myGpuMalloc(x2475 * sizeof(float)); float* x2486 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2487 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2488 = (float*)myMalloc(1 * sizeof(float));; x2488[0] = 0.0f; float* x2490 = (float*)myMalloc(1 * sizeof(float));; x2490[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2490, x2488, in_desc, x2478, out_desc, x2485, sbmv_desc, x385, x952, 0.1, x1072, x766, 1.0E-5, x2486, x2487)); }; float* x2493 = (float*)myGpuMalloc(x2477 * sizeof(float)); float* x2494 = (float*)myMalloc(1 * sizeof(float));; x2494[0] = 0.0f; float* x2496 = (float*)myMalloc(1 * sizeof(float));; x2496[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2496, x_desc, x2485, x2494, x_desc, x2485)); }; if (x2501) { } else { assert(false && "ERROR not specified"); } float* x2514 = (float*)myGpuMalloc(x2513 * sizeof(float)); float* x2515 = (float*)myMalloc(1 * sizeof(float));; x2515[0] = 0.0f; float* x2517 = (float*)myMalloc(1 * sizeof(float));; x2517[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2517, in_desc, x2485, filt_desc, x388, conv_desc, algo, ws_data, ws_size, x2515, out_desc, x2514)); }; float* x2520 = (float*)myGpuMalloc(x2513 * sizeof(float)); float* x2521 = (float*)myGpuMalloc(x2511 * sizeof(float)); float* x2522 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2523 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2524 = (float*)myMalloc(1 * sizeof(float));; x2524[0] = 0.0f; float* x2526 = (float*)myMalloc(1 * sizeof(float));; x2526[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2526, x2524, in_desc, x2514, out_desc, x2521, sbmv_desc, x1108, x583, 0.1, x895, x1006, 1.0E-5, x2522, x2523)); }; float* x2529 = (float*)myGpuMalloc(x2513 * sizeof(float)); float* x2530 = (float*)myMalloc(1 * sizeof(float));; x2530[0] = 0.0f; float* x2532 = (float*)myMalloc(1 * sizeof(float));; x2532[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2532, x_desc, x2521, x2530, x_desc, x2521)); }; if (x2536) { } else { assert(false && "ERROR not specified"); } float* x2548 = (float*)myGpuMalloc(x2547 * sizeof(float)); float* x2549 = (float*)myMalloc(1 * sizeof(float));; x2549[0] = 0.0f; float* x2551 = (float*)myMalloc(1 * sizeof(float));; x2551[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2551, in_desc, x2521, filt_desc, x463, conv_desc, algo, ws_data, ws_size, x2549, out_desc, x2548)); }; float* x2554 = (float*)myGpuMalloc(x2547 * sizeof(float)); float* x2555 = (float*)myGpuMalloc(x2545 * sizeof(float)); float* x2556 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2557 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2558 = (float*)myMalloc(1 * sizeof(float));; x2558[0] = 0.0f; float* x2560 = (float*)myMalloc(1 * sizeof(float));; x2560[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2560, x2558, in_desc, x2548, out_desc, x2555, sbmv_desc, x355, x991, 0.1, x841, x724, 1.0E-5, x2556, x2557)); }; float* x2563 = (float*)myGpuMalloc(x2547 * sizeof(float)); if (x2567) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2399) x Sym(2399), res: x Const(64) x Const(1024) x Sym(2542) x Sym(2542)"); } float* x2572 = (float*)myMalloc(1 * sizeof(float));; x2572[0] = 1.0f; float* x2574 = (float*)myMalloc(1 * sizeof(float));; x2574[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2572, bias_desc, x2412, x2574, out_desc, x2555)); }; float* x2577 = (float*)myMalloc(1 * sizeof(float));; x2577[0] = 0.0f; float* x2579 = (float*)myMalloc(1 * sizeof(float));; x2579[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2579, x_desc, x2555, x2577, x_desc, x2555)); }; if (x2583) { } else { assert(false && "ERROR not specified"); } float* x2595 = (float*)myGpuMalloc(x2594 * sizeof(float)); float* x2596 = (float*)myMalloc(1 * sizeof(float));; x2596[0] = 0.0f; float* x2598 = (float*)myMalloc(1 * sizeof(float));; x2598[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2598, in_desc, x2555, filt_desc, x949, conv_desc, algo, ws_data, ws_size, x2596, out_desc, x2595)); }; float* x2601 = (float*)myGpuMalloc(x2594 * sizeof(float)); float* x2602 = (float*)myGpuMalloc(x2592 * sizeof(float)); float* x2603 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2604 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2605 = (float*)myMalloc(1 * sizeof(float));; x2605[0] = 0.0f; float* x2607 = (float*)myMalloc(1 * sizeof(float));; x2607[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2607, x2605, in_desc, x2595, out_desc, x2602, sbmv_desc, x682, x886, 0.1, x829, x817, 1.0E-5, x2603, x2604)); }; float* x2610 = (float*)myGpuMalloc(x2594 * sizeof(float)); float* x2611 = (float*)myMalloc(1 * sizeof(float));; x2611[0] = 0.0f; float* x2613 = (float*)myMalloc(1 * sizeof(float));; x2613[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2613, x_desc, x2602, x2611, x_desc, x2602)); }; if (x2618) { } else { assert(false && "ERROR not specified"); } float* x2631 = (float*)myGpuMalloc(x2630 * sizeof(float)); float* x2632 = (float*)myMalloc(1 * sizeof(float));; x2632[0] = 0.0f; float* x2634 = (float*)myMalloc(1 * sizeof(float));; x2634[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2634, in_desc, x2602, filt_desc, x337, conv_desc, algo, ws_data, ws_size, x2632, out_desc, x2631)); }; float* x2637 = (float*)myGpuMalloc(x2630 * sizeof(float)); float* x2638 = (float*)myGpuMalloc(x2628 * sizeof(float)); float* x2639 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2640 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2641 = (float*)myMalloc(1 * sizeof(float));; x2641[0] = 0.0f; float* x2643 = (float*)myMalloc(1 * sizeof(float));; x2643[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2643, x2641, in_desc, x2631, out_desc, x2638, sbmv_desc, x979, x871, 0.1, x667, x484, 1.0E-5, x2639, x2640)); }; float* x2646 = (float*)myGpuMalloc(x2630 * sizeof(float)); float* x2647 = (float*)myMalloc(1 * sizeof(float));; x2647[0] = 0.0f; float* x2649 = (float*)myMalloc(1 * sizeof(float));; x2649[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2649, x_desc, x2638, x2647, x_desc, x2638)); }; if (x2653) { } else { assert(false && "ERROR not specified"); } float* x2665 = (float*)myGpuMalloc(x2664 * sizeof(float)); float* x2666 = (float*)myMalloc(1 * sizeof(float));; x2666[0] = 0.0f; float* x2668 = (float*)myMalloc(1 * sizeof(float));; x2668[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2668, in_desc, x2638, filt_desc, x643, conv_desc, algo, ws_data, ws_size, x2666, out_desc, x2665)); }; float* x2671 = (float*)myGpuMalloc(x2664 * sizeof(float)); float* x2672 = (float*)myGpuMalloc(x2662 * sizeof(float)); float* x2673 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2674 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2675 = (float*)myMalloc(1 * sizeof(float));; x2675[0] = 0.0f; float* x2677 = (float*)myMalloc(1 * sizeof(float));; x2677[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2677, x2675, in_desc, x2665, out_desc, x2672, sbmv_desc, x1084, x466, 0.1, x715, x859, 1.0E-5, x2673, x2674)); }; float* x2680 = (float*)myGpuMalloc(x2664 * sizeof(float)); if (x2684) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2542) x Sym(2542), res: x Const(64) x Const(1024) x Sym(2659) x Sym(2659)"); } float* x2689 = (float*)myMalloc(1 * sizeof(float));; x2689[0] = 1.0f; float* x2691 = (float*)myMalloc(1 * sizeof(float));; x2691[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2689, bias_desc, x2555, x2691, out_desc, x2672)); }; float* x2694 = (float*)myMalloc(1 * sizeof(float));; x2694[0] = 0.0f; float* x2696 = (float*)myMalloc(1 * sizeof(float));; x2696[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2696, x_desc, x2672, x2694, x_desc, x2672)); }; if (x2700) { } else { assert(false && "ERROR not specified"); } float* x2712 = (float*)myGpuMalloc(x2711 * sizeof(float)); float* x2713 = (float*)myMalloc(1 * sizeof(float));; x2713[0] = 0.0f; float* x2715 = (float*)myMalloc(1 * sizeof(float));; x2715[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2715, in_desc, x2672, filt_desc, x313, conv_desc, algo, ws_data, ws_size, x2713, out_desc, x2712)); }; float* x2718 = (float*)myGpuMalloc(x2711 * sizeof(float)); float* x2719 = (float*)myGpuMalloc(x2709 * sizeof(float)); float* x2720 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2721 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2722 = (float*)myMalloc(1 * sizeof(float));; x2722[0] = 0.0f; float* x2724 = (float*)myMalloc(1 * sizeof(float));; x2724[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2724, x2722, in_desc, x2712, out_desc, x2719, sbmv_desc, x571, x1018, 0.1, x784, x589, 1.0E-5, x2720, x2721)); }; float* x2727 = (float*)myGpuMalloc(x2711 * sizeof(float)); float* x2728 = (float*)myMalloc(1 * sizeof(float));; x2728[0] = 0.0f; float* x2730 = (float*)myMalloc(1 * sizeof(float));; x2730[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2730, x_desc, x2719, x2728, x_desc, x2719)); }; if (x2735) { } else { assert(false && "ERROR not specified"); } float* x2748 = (float*)myGpuMalloc(x2747 * sizeof(float)); float* x2749 = (float*)myMalloc(1 * sizeof(float));; x2749[0] = 0.0f; float* x2751 = (float*)myMalloc(1 * sizeof(float));; x2751[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2751, in_desc, x2719, filt_desc, x1042, conv_desc, algo, ws_data, ws_size, x2749, out_desc, x2748)); }; float* x2754 = (float*)myGpuMalloc(x2747 * sizeof(float)); float* x2755 = (float*)myGpuMalloc(x2745 * sizeof(float)); float* x2756 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2757 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2758 = (float*)myMalloc(1 * sizeof(float));; x2758[0] = 0.0f; float* x2760 = (float*)myMalloc(1 * sizeof(float));; x2760[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2760, x2758, in_desc, x2748, out_desc, x2755, sbmv_desc, x517, x703, 0.1, x853, x985, 1.0E-5, x2756, x2757)); }; float* x2763 = (float*)myGpuMalloc(x2747 * sizeof(float)); float* x2764 = (float*)myMalloc(1 * sizeof(float));; x2764[0] = 0.0f; float* x2766 = (float*)myMalloc(1 * sizeof(float));; x2766[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2766, x_desc, x2755, x2764, x_desc, x2755)); }; if (x2770) { } else { assert(false && "ERROR not specified"); } float* x2782 = (float*)myGpuMalloc(x2781 * sizeof(float)); float* x2783 = (float*)myMalloc(1 * sizeof(float));; x2783[0] = 0.0f; float* x2785 = (float*)myMalloc(1 * sizeof(float));; x2785[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2785, in_desc, x2755, filt_desc, x562, conv_desc, algo, ws_data, ws_size, x2783, out_desc, x2782)); }; float* x2788 = (float*)myGpuMalloc(x2781 * sizeof(float)); float* x2789 = (float*)myGpuMalloc(x2779 * sizeof(float)); float* x2790 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2791 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2792 = (float*)myMalloc(1 * sizeof(float));; x2792[0] = 0.0f; float* x2794 = (float*)myMalloc(1 * sizeof(float));; x2794[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2794, x2792, in_desc, x2782, out_desc, x2789, sbmv_desc, x1009, x733, 0.1, x988, x778, 1.0E-5, x2790, x2791)); }; float* x2797 = (float*)myGpuMalloc(x2781 * sizeof(float)); if (x2801) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2659) x Sym(2659), res: x Const(64) x Const(1024) x Sym(2776) x Sym(2776)"); } float* x2806 = (float*)myMalloc(1 * sizeof(float));; x2806[0] = 1.0f; float* x2808 = (float*)myMalloc(1 * sizeof(float));; x2808[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2806, bias_desc, x2672, x2808, out_desc, x2789)); }; float* x2811 = (float*)myMalloc(1 * sizeof(float));; x2811[0] = 0.0f; float* x2813 = (float*)myMalloc(1 * sizeof(float));; x2813[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2813, x_desc, x2789, x2811, x_desc, x2789)); }; if (x2817) { } else { assert(false && "ERROR not specified"); } float* x2829 = (float*)myGpuMalloc(x2828 * sizeof(float)); float* x2830 = (float*)myMalloc(1 * sizeof(float));; x2830[0] = 0.0f; float* x2832 = (float*)myMalloc(1 * sizeof(float));; x2832[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2832, in_desc, x2789, filt_desc, x361, conv_desc, algo, ws_data, ws_size, x2830, out_desc, x2829)); }; float* x2835 = (float*)myGpuMalloc(x2828 * sizeof(float)); float* x2836 = (float*)myGpuMalloc(x2826 * sizeof(float)); float* x2837 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2838 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2839 = (float*)myMalloc(1 * sizeof(float));; x2839[0] = 0.0f; float* x2841 = (float*)myMalloc(1 * sizeof(float));; x2841[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2841, x2839, in_desc, x2829, out_desc, x2836, sbmv_desc, x526, x850, 0.1, x1057, x502, 1.0E-5, x2837, x2838)); }; float* x2844 = (float*)myGpuMalloc(x2828 * sizeof(float)); float* x2845 = (float*)myMalloc(1 * sizeof(float));; x2845[0] = 0.0f; float* x2847 = (float*)myMalloc(1 * sizeof(float));; x2847[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2847, x_desc, x2836, x2845, x_desc, x2836)); }; if (x2852) { } else { assert(false && "ERROR not specified"); } float* x2865 = (float*)myGpuMalloc(x2864 * sizeof(float)); float* x2866 = (float*)myMalloc(1 * sizeof(float));; x2866[0] = 0.0f; float* x2868 = (float*)myMalloc(1 * sizeof(float));; x2868[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2868, in_desc, x2836, filt_desc, x1081, conv_desc, algo, ws_data, ws_size, x2866, out_desc, x2865)); }; float* x2871 = (float*)myGpuMalloc(x2864 * sizeof(float)); float* x2872 = (float*)myGpuMalloc(x2862 * sizeof(float)); float* x2873 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2874 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2875 = (float*)myMalloc(1 * sizeof(float));; x2875[0] = 0.0f; float* x2877 = (float*)myMalloc(1 * sizeof(float));; x2877[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2877, x2875, in_desc, x2865, out_desc, x2872, sbmv_desc, x799, x622, 0.1, x1045, x607, 1.0E-5, x2873, x2874)); }; float* x2880 = (float*)myGpuMalloc(x2864 * sizeof(float)); float* x2881 = (float*)myMalloc(1 * sizeof(float));; x2881[0] = 0.0f; float* x2883 = (float*)myMalloc(1 * sizeof(float));; x2883[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2883, x_desc, x2872, x2881, x_desc, x2872)); }; if (x2887) { } else { assert(false && "ERROR not specified"); } float* x2899 = (float*)myGpuMalloc(x2898 * sizeof(float)); float* x2900 = (float*)myMalloc(1 * sizeof(float));; x2900[0] = 0.0f; float* x2902 = (float*)myMalloc(1 * sizeof(float));; x2902[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2902, in_desc, x2872, filt_desc, x958, conv_desc, algo, ws_data, ws_size, x2900, out_desc, x2899)); }; float* x2905 = (float*)myGpuMalloc(x2898 * sizeof(float)); float* x2906 = (float*)myGpuMalloc(x2896 * sizeof(float)); float* x2907 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2908 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2909 = (float*)myMalloc(1 * sizeof(float));; x2909[0] = 0.0f; float* x2911 = (float*)myMalloc(1 * sizeof(float));; x2911[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2911, x2909, in_desc, x2899, out_desc, x2906, sbmv_desc, x472, x655, 0.1, x922, x1111, 1.0E-5, x2907, x2908)); }; float* x2914 = (float*)myGpuMalloc(x2898 * sizeof(float)); if (x2918) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2776) x Sym(2776), res: x Const(64) x Const(1024) x Sym(2893) x Sym(2893)"); } float* x2923 = (float*)myMalloc(1 * sizeof(float));; x2923[0] = 1.0f; float* x2925 = (float*)myMalloc(1 * sizeof(float));; x2925[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2923, bias_desc, x2789, x2925, out_desc, x2906)); }; float* x2928 = (float*)myMalloc(1 * sizeof(float));; x2928[0] = 0.0f; float* x2930 = (float*)myMalloc(1 * sizeof(float));; x2930[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2930, x_desc, x2906, x2928, x_desc, x2906)); }; if (x2934) { } else { assert(false && "ERROR not specified"); } float* x2946 = (float*)myGpuMalloc(x2945 * sizeof(float)); float* x2947 = (float*)myMalloc(1 * sizeof(float));; x2947[0] = 0.0f; float* x2949 = (float*)myMalloc(1 * sizeof(float));; x2949[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2949, in_desc, x2906, filt_desc, x748, conv_desc, algo, ws_data, ws_size, x2947, out_desc, x2946)); }; float* x2952 = (float*)myGpuMalloc(x2945 * sizeof(float)); float* x2953 = (float*)myGpuMalloc(x2943 * sizeof(float)); float* x2954 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2955 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2956 = (float*)myMalloc(1 * sizeof(float));; x2956[0] = 0.0f; float* x2958 = (float*)myMalloc(1 * sizeof(float));; x2958[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2958, x2956, in_desc, x2946, out_desc, x2953, sbmv_desc, x550, x1054, 0.1, x535, x823, 1.0E-5, x2954, x2955)); }; float* x2961 = (float*)myGpuMalloc(x2945 * sizeof(float)); float* x2962 = (float*)myMalloc(1 * sizeof(float));; x2962[0] = 0.0f; float* x2964 = (float*)myMalloc(1 * sizeof(float));; x2964[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2964, x_desc, x2953, x2962, x_desc, x2953)); }; if (x2969) { } else { assert(false && "ERROR not specified"); } float* x2982 = (float*)myGpuMalloc(x2981 * sizeof(float)); float* x2983 = (float*)myMalloc(1 * sizeof(float));; x2983[0] = 0.0f; float* x2985 = (float*)myMalloc(1 * sizeof(float));; x2985[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2985, in_desc, x2953, filt_desc, x973, conv_desc, algo, ws_data, ws_size, x2983, out_desc, x2982)); }; float* x2988 = (float*)myGpuMalloc(x2981 * sizeof(float)); float* x2989 = (float*)myGpuMalloc(x2979 * sizeof(float)); float* x2990 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2991 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2992 = (float*)myMalloc(1 * sizeof(float));; x2992[0] = 0.0f; float* x2994 = (float*)myMalloc(1 * sizeof(float));; x2994[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2994, x2992, in_desc, x2982, out_desc, x2989, sbmv_desc, x718, x862, 0.1, x505, x1015, 1.0E-5, x2990, x2991)); }; float* x2997 = (float*)myGpuMalloc(x2981 * sizeof(float)); float* x2998 = (float*)myMalloc(1 * sizeof(float));; x2998[0] = 0.0f; float* x3000 = (float*)myMalloc(1 * sizeof(float));; x3000[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3000, x_desc, x2989, x2998, x_desc, x2989)); }; if (x3004) { } else { assert(false && "ERROR not specified"); } float* x3016 = (float*)myGpuMalloc(x3015 * sizeof(float)); float* x3017 = (float*)myMalloc(1 * sizeof(float));; x3017[0] = 0.0f; float* x3019 = (float*)myMalloc(1 * sizeof(float));; x3019[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3019, in_desc, x2989, filt_desc, x586, conv_desc, algo, ws_data, ws_size, x3017, out_desc, x3016)); }; float* x3022 = (float*)myGpuMalloc(x3015 * sizeof(float)); float* x3023 = (float*)myGpuMalloc(x3013 * sizeof(float)); float* x3024 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x3025 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x3026 = (float*)myMalloc(1 * sizeof(float));; x3026[0] = 0.0f; float* x3028 = (float*)myMalloc(1 * sizeof(float));; x3028[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3028, x3026, in_desc, x3016, out_desc, x3023, sbmv_desc, x1039, x574, 0.1, x661, x844, 1.0E-5, x3024, x3025)); }; float* x3031 = (float*)myGpuMalloc(x3015 * sizeof(float)); if (x3035) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2893) x Sym(2893), res: x Const(64) x Const(1024) x Sym(3010) x Sym(3010)"); } float* x3040 = (float*)myMalloc(1 * sizeof(float));; x3040[0] = 1.0f; float* x3042 = (float*)myMalloc(1 * sizeof(float));; x3042[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3040, bias_desc, x2906, x3042, out_desc, x3023)); }; float* x3045 = (float*)myMalloc(1 * sizeof(float));; x3045[0] = 0.0f; float* x3047 = (float*)myMalloc(1 * sizeof(float));; x3047[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3047, x_desc, x3023, x3045, x_desc, x3023)); }; if (x3051) { } else { assert(false && "ERROR not specified"); } float* x3063 = (float*)myGpuMalloc(x3062 * sizeof(float)); float* x3064 = (float*)myMalloc(1 * sizeof(float));; x3064[0] = 0.0f; float* x3066 = (float*)myMalloc(1 * sizeof(float));; x3066[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3066, in_desc, x3023, filt_desc, x712, conv_desc, algo, ws_data, ws_size, x3064, out_desc, x3063)); }; float* x3069 = (float*)myGpuMalloc(x3062 * sizeof(float)); float* x3070 = (float*)myGpuMalloc(x3060 * sizeof(float)); float* x3071 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3072 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3073 = (float*)myMalloc(1 * sizeof(float));; x3073[0] = 0.0f; float* x3075 = (float*)myMalloc(1 * sizeof(float));; x3075[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3075, x3073, in_desc, x3063, out_desc, x3070, sbmv_desc, x898, x967, 0.1, x496, x658, 1.0E-5, x3071, x3072)); }; float* x3078 = (float*)myGpuMalloc(x3062 * sizeof(float)); float* x3079 = (float*)myMalloc(1 * sizeof(float));; x3079[0] = 0.0f; float* x3081 = (float*)myMalloc(1 * sizeof(float));; x3081[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3081, x_desc, x3070, x3079, x_desc, x3070)); }; if (x3086) { } else { assert(false && "ERROR not specified"); } float* x3099 = (float*)myGpuMalloc(x3098 * sizeof(float)); float* x3100 = (float*)myMalloc(1 * sizeof(float));; x3100[0] = 0.0f; float* x3102 = (float*)myMalloc(1 * sizeof(float));; x3102[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3102, in_desc, x3070, filt_desc, x397, conv_desc, algo, ws_data, ws_size, x3100, out_desc, x3099)); }; float* x3105 = (float*)myGpuMalloc(x3098 * sizeof(float)); float* x3106 = (float*)myGpuMalloc(x3096 * sizeof(float)); float* x3107 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3108 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3109 = (float*)myMalloc(1 * sizeof(float));; x3109[0] = 0.0f; float* x3111 = (float*)myMalloc(1 * sizeof(float));; x3111[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3111, x3109, in_desc, x3099, out_desc, x3106, sbmv_desc, x910, x772, 0.1, x634, x445, 1.0E-5, x3107, x3108)); }; float* x3114 = (float*)myGpuMalloc(x3098 * sizeof(float)); float* x3115 = (float*)myMalloc(1 * sizeof(float));; x3115[0] = 0.0f; float* x3117 = (float*)myMalloc(1 * sizeof(float));; x3117[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3117, x_desc, x3106, x3115, x_desc, x3106)); }; if (x3121) { } else { assert(false && "ERROR not specified"); } float* x3133 = (float*)myGpuMalloc(x3132 * sizeof(float)); float* x3134 = (float*)myMalloc(1 * sizeof(float));; x3134[0] = 0.0f; float* x3136 = (float*)myMalloc(1 * sizeof(float));; x3136[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3136, in_desc, x3106, filt_desc, x931, conv_desc, algo, ws_data, ws_size, x3134, out_desc, x3133)); }; float* x3139 = (float*)myGpuMalloc(x3132 * sizeof(float)); float* x3140 = (float*)myGpuMalloc(x3130 * sizeof(float)); float* x3141 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3142 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3143 = (float*)myMalloc(1 * sizeof(float));; x3143[0] = 0.0f; float* x3145 = (float*)myMalloc(1 * sizeof(float));; x3145[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3145, x3143, in_desc, x3133, out_desc, x3140, sbmv_desc, x1012, x481, 0.1, x640, x874, 1.0E-5, x3141, x3142)); }; float* x3148 = (float*)myGpuMalloc(x3132 * sizeof(float)); if (x3051) { } else { assert(false && "ERROR not specified"); } float* x3159 = (float*)myGpuMalloc(x3158 * sizeof(float)); float* x3160 = (float*)myMalloc(1 * sizeof(float));; x3160[0] = 0.0f; float* x3162 = (float*)myMalloc(1 * sizeof(float));; x3162[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3162, in_desc, x3023, filt_desc, x937, conv_desc, algo, ws_data, ws_size, x3160, out_desc, x3159)); }; float* x3165 = (float*)myGpuMalloc(x3158 * sizeof(float)); float* x3166 = (float*)myGpuMalloc(x3156 * sizeof(float)); float* x3167 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3168 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3169 = (float*)myMalloc(1 * sizeof(float));; x3169[0] = 0.0f; float* x3171 = (float*)myMalloc(1 * sizeof(float));; x3171[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3171, x3169, in_desc, x3159, out_desc, x3166, sbmv_desc, x814, x616, 0.1, x487, x670, 1.0E-5, x3167, x3168)); }; float* x3174 = (float*)myGpuMalloc(x3158 * sizeof(float)); if (x3178) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3153) x Sym(3153), res: x Const(64) x Const(2048) x Sym(3127) x Sym(3127)"); } float* x3183 = (float*)myMalloc(1 * sizeof(float));; x3183[0] = 1.0f; float* x3185 = (float*)myMalloc(1 * sizeof(float));; x3185[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3183, bias_desc, x3166, x3185, out_desc, x3140)); }; float* x3188 = (float*)myMalloc(1 * sizeof(float));; x3188[0] = 0.0f; float* x3190 = (float*)myMalloc(1 * sizeof(float));; x3190[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3190, x_desc, x3140, x3188, x_desc, x3140)); }; if (x3194) { } else { assert(false && "ERROR not specified"); } float* x3206 = (float*)myGpuMalloc(x3205 * sizeof(float)); float* x3207 = (float*)myMalloc(1 * sizeof(float));; x3207[0] = 0.0f; float* x3209 = (float*)myMalloc(1 * sizeof(float));; x3209[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3209, in_desc, x3140, filt_desc, x940, conv_desc, algo, ws_data, ws_size, x3207, out_desc, x3206)); }; float* x3212 = (float*)myGpuMalloc(x3205 * sizeof(float)); float* x3213 = (float*)myGpuMalloc(x3203 * sizeof(float)); float* x3214 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3215 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3216 = (float*)myMalloc(1 * sizeof(float));; x3216[0] = 0.0f; float* x3218 = (float*)myMalloc(1 * sizeof(float));; x3218[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3218, x3216, in_desc, x3206, out_desc, x3213, sbmv_desc, x433, x706, 0.1, x757, x490, 1.0E-5, x3214, x3215)); }; float* x3221 = (float*)myGpuMalloc(x3205 * sizeof(float)); float* x3222 = (float*)myMalloc(1 * sizeof(float));; x3222[0] = 0.0f; float* x3224 = (float*)myMalloc(1 * sizeof(float));; x3224[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3224, x_desc, x3213, x3222, x_desc, x3213)); }; if (x3229) { } else { assert(false && "ERROR not specified"); } float* x3242 = (float*)myGpuMalloc(x3241 * sizeof(float)); float* x3243 = (float*)myMalloc(1 * sizeof(float));; x3243[0] = 0.0f; float* x3245 = (float*)myMalloc(1 * sizeof(float));; x3245[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3245, in_desc, x3213, filt_desc, x760, conv_desc, algo, ws_data, ws_size, x3243, out_desc, x3242)); }; float* x3248 = (float*)myGpuMalloc(x3241 * sizeof(float)); float* x3249 = (float*)myGpuMalloc(x3239 * sizeof(float)); float* x3250 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3251 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3252 = (float*)myMalloc(1 * sizeof(float));; x3252[0] = 0.0f; float* x3254 = (float*)myMalloc(1 * sizeof(float));; x3254[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3254, x3252, in_desc, x3242, out_desc, x3249, sbmv_desc, x775, x493, 0.1, x709, x880, 1.0E-5, x3250, x3251)); }; float* x3257 = (float*)myGpuMalloc(x3241 * sizeof(float)); float* x3258 = (float*)myMalloc(1 * sizeof(float));; x3258[0] = 0.0f; float* x3260 = (float*)myMalloc(1 * sizeof(float));; x3260[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3260, x_desc, x3249, x3258, x_desc, x3249)); }; if (x3264) { } else { assert(false && "ERROR not specified"); } float* x3276 = (float*)myGpuMalloc(x3275 * sizeof(float)); float* x3277 = (float*)myMalloc(1 * sizeof(float));; x3277[0] = 0.0f; float* x3279 = (float*)myMalloc(1 * sizeof(float));; x3279[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3279, in_desc, x3249, filt_desc, x436, conv_desc, algo, ws_data, ws_size, x3277, out_desc, x3276)); }; float* x3282 = (float*)myGpuMalloc(x3275 * sizeof(float)); float* x3283 = (float*)myGpuMalloc(x3273 * sizeof(float)); float* x3284 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3285 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3286 = (float*)myMalloc(1 * sizeof(float));; x3286[0] = 0.0f; float* x3288 = (float*)myMalloc(1 * sizeof(float));; x3288[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3288, x3286, in_desc, x3276, out_desc, x3283, sbmv_desc, x577, x727, 0.1, x499, x1030, 1.0E-5, x3284, x3285)); }; float* x3291 = (float*)myGpuMalloc(x3275 * sizeof(float)); if (x3295) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3127) x Sym(3127), res: x Const(64) x Const(2048) x Sym(3270) x Sym(3270)"); } float* x3300 = (float*)myMalloc(1 * sizeof(float));; x3300[0] = 1.0f; float* x3302 = (float*)myMalloc(1 * sizeof(float));; x3302[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3300, bias_desc, x3140, x3302, out_desc, x3283)); }; float* x3305 = (float*)myMalloc(1 * sizeof(float));; x3305[0] = 0.0f; float* x3307 = (float*)myMalloc(1 * sizeof(float));; x3307[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3307, x_desc, x3283, x3305, x_desc, x3283)); }; if (x3311) { } else { assert(false && "ERROR not specified"); } float* x3323 = (float*)myGpuMalloc(x3322 * sizeof(float)); float* x3324 = (float*)myMalloc(1 * sizeof(float));; x3324[0] = 0.0f; float* x3326 = (float*)myMalloc(1 * sizeof(float));; x3326[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3326, in_desc, x3283, filt_desc, x1090, conv_desc, algo, ws_data, ws_size, x3324, out_desc, x3323)); }; float* x3329 = (float*)myGpuMalloc(x3322 * sizeof(float)); float* x3330 = (float*)myGpuMalloc(x3320 * sizeof(float)); float* x3331 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3332 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3333 = (float*)myMalloc(1 * sizeof(float));; x3333[0] = 0.0f; float* x3335 = (float*)myMalloc(1 * sizeof(float));; x3335[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3335, x3333, in_desc, x3323, out_desc, x3330, sbmv_desc, x340, x529, 0.1, x934, x1060, 1.0E-5, x3331, x3332)); }; float* x3338 = (float*)myGpuMalloc(x3322 * sizeof(float)); float* x3339 = (float*)myMalloc(1 * sizeof(float));; x3339[0] = 0.0f; float* x3341 = (float*)myMalloc(1 * sizeof(float));; x3341[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3341, x_desc, x3330, x3339, x_desc, x3330)); }; if (x3346) { } else { assert(false && "ERROR not specified"); } float* x3359 = (float*)myGpuMalloc(x3358 * sizeof(float)); float* x3360 = (float*)myMalloc(1 * sizeof(float));; x3360[0] = 0.0f; float* x3362 = (float*)myMalloc(1 * sizeof(float));; x3362[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3362, in_desc, x3330, filt_desc, x379, conv_desc, algo, ws_data, ws_size, x3360, out_desc, x3359)); }; float* x3365 = (float*)myGpuMalloc(x3358 * sizeof(float)); float* x3366 = (float*)myGpuMalloc(x3356 * sizeof(float)); float* x3367 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3368 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3369 = (float*)myMalloc(1 * sizeof(float));; x3369[0] = 0.0f; float* x3371 = (float*)myMalloc(1 * sizeof(float));; x3371[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3371, x3369, in_desc, x3359, out_desc, x3366, sbmv_desc, x877, x802, 0.1, x331, x901, 1.0E-5, x3367, x3368)); }; float* x3374 = (float*)myGpuMalloc(x3358 * sizeof(float)); float* x3375 = (float*)myMalloc(1 * sizeof(float));; x3375[0] = 0.0f; float* x3377 = (float*)myMalloc(1 * sizeof(float));; x3377[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3377, x_desc, x3366, x3375, x_desc, x3366)); }; if (x3381) { } else { assert(false && "ERROR not specified"); } float* x3393 = (float*)myGpuMalloc(x3392 * sizeof(float)); float* x3394 = (float*)myMalloc(1 * sizeof(float));; x3394[0] = 0.0f; float* x3396 = (float*)myMalloc(1 * sizeof(float));; x3396[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3396, in_desc, x3366, filt_desc, x394, conv_desc, algo, ws_data, ws_size, x3394, out_desc, x3393)); }; float* x3399 = (float*)myGpuMalloc(x3392 * sizeof(float)); float* x3400 = (float*)myGpuMalloc(x3390 * sizeof(float)); float* x3401 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3402 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3403 = (float*)myMalloc(1 * sizeof(float));; x3403[0] = 0.0f; float* x3405 = (float*)myMalloc(1 * sizeof(float));; x3405[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3405, x3403, in_desc, x3393, out_desc, x3400, sbmv_desc, x604, x838, 0.1, x1075, x664, 1.0E-5, x3401, x3402)); }; float* x3408 = (float*)myGpuMalloc(x3392 * sizeof(float)); if (x3412) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3270) x Sym(3270), res: x Const(64) x Const(2048) x Sym(3387) x Sym(3387)"); } float* x3417 = (float*)myMalloc(1 * sizeof(float));; x3417[0] = 1.0f; float* x3419 = (float*)myMalloc(1 * sizeof(float));; x3419[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3417, bias_desc, x3283, x3419, out_desc, x3400)); }; float* x3422 = (float*)myMalloc(1 * sizeof(float));; x3422[0] = 0.0f; float* x3424 = (float*)myMalloc(1 * sizeof(float));; x3424[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3424, x_desc, x3400, x3422, x_desc, x3400)); }; if (x3428) { } else { assert(false && "Image too small for averagePool_batch: x Const(64) x Const(2048) x Sym(3387) x Sym(3387)|(2,2)"); } float* x3433 = (float*)myMalloc(1 * sizeof(float));; x3433[0] = 0.0f; float* x3435 = (float*)myMalloc(1 * sizeof(float));; x3435[0] = 1.0f; float* x3445 = (float*)myGpuMalloc(x3444 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3439, x3439)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 1, 1 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x3435, in_desc, x3400, x3433, out_desc, x3445)); }; float* x3447 = (float*)myGpuMalloc(x3444 * sizeof(float)); int32_t x3448 = 0; int32_t x3449 = 1; x3449 *= 64; x3448 += 1; int32_t x3452 = x3448; bool x3453 = x3452 >= 2; if (x3453) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3459 = x3452 == 0; if (x3459) { int32_t x3460 = x3449; bool x3461 = x3460 == x3442; if (x3461) { } else { assert(false && "must same size!!"); } } else { } int32_t x3468 = x3449; // foward of gemm // gemm: List(Const(64), Sym(3469)), Vector(Const(10), Const(2048)) float* x3473 = (float*)myGpuMalloc(640 * sizeof(float)); float* x3474 = (float*)myMalloc(1 * sizeof(float));; x3474[0] = 0.0f; float* x3476 = (float*)myMalloc(1 * sizeof(float));; x3476[0] = 1.0f; CUBLAS_CALL(hipblasSgemm(cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, 10,64,2048,x3476,x976,2048,x3445,2048,x3474,x3473,10)); float* x3479 = (float*)myGpuMalloc(640 * sizeof(float)); float* x3480 = (float*)myMalloc(1 * sizeof(float));; x3480[0] = 1.0f; float* x3482 = (float*)myMalloc(1 * sizeof(float));; x3482[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 10, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3480, bias_desc, x439, x3482, out_desc, x3473)); }; int32_t x3485 = 0; int32_t x3486 = 1; x3486 *= 64; x3486 *= 10; x3486 *= 1; x3486 *= 1; int32_t x3491 = x3485; bool x3492 = x3491 >= 2; if (x3492) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3497 = x3491 == 0; if (x3497) { int32_t x3498 = x3486; bool x3499 = x3498 == 640; if (x3499) { } else { assert(false && "must same size!!"); } } else { } float* x3506 = (float*)myMalloc(1 * sizeof(float));; x3506[0] = 0.0f; float* x3508 = (float*)myMalloc(1 * sizeof(float));; x3508[0] = 1.0f; float* x3510 = (float*)myGpuMalloc(640 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnSoftmaxForward( cudnnHandle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_CHANNEL, x3508, x_desc, x3473, x3506, x_desc, x3510)); }; int32_t x3512 = 0; int32_t x3513 = 1; x3513 *= 64; x3513 *= 10; int32_t x3516 = x3512; bool x3517 = x3516 >= 2; if (x3517) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3522 = x3516 == 0; if (x3522) { int32_t x3523 = x3513; bool x3524 = x3523 == 640; if (x3524) { } else { assert(false && "must same size!!"); } } else { } float* x3531 = (float*)myGpuMalloc(640 * sizeof(float)); float* x3532 = (float*)myGpuMalloc(64 * sizeof(float));hipLaunchKernelGGL(( nllLoss), dim3(64), dim3(1), 0, 0, x3510, 10, x3532, x1405); float* x3534 = (float*)myGpuMalloc(64 * sizeof(float)); int32_t x3535 = 0; int32_t x3536 = 1; x3536 *= 64; x3536 *= 1; x3536 *= 1; x3536 *= 1; int32_t x3541 = x3535; bool x3542 = x3541 >= 2; if (x3542) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3547 = x3541 == 0; if (x3547) { int32_t x3548 = x3536; bool x3549 = x3548 == 64; if (x3549) { } else { assert(false && "must same size!!"); } } else { } float* x3556 = (float*)myGpuMalloc(1 * sizeof(float)); float* x3557 = (float*)myMalloc(1 * sizeof(float));; x3557[0] = 0.0f; float* x3559 = (float*)myMalloc(1 * sizeof(float));; x3559[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, 1, 1)); cudnnReduceTensorDescriptor_t reduce_desc; CUDNN_CALL(cudnnCreateReduceTensorDescriptor(&reduce_desc)); CUDNN_CALL(cudnnSetReduceTensorDescriptor( reduce_desc, CUDNN_REDUCE_TENSOR_AVG, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN, CUDNN_REDUCE_TENSOR_NO_INDICES, CUDNN_32BIT_INDICES)); void *indices = nullptr; // Don't store indices. // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetReductionWorkspaceSize( cudnnHandle, reduce_desc, x_desc, out_desc, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnReduceTensor( cudnnHandle, reduce_desc, indices, 0, ws_data, ws_size, x3559, x_desc, x3532, x3557, out_desc, x3556)); }; int32_t x3562 = 0; int32_t x3563 = 1; x3563 *= 1; int32_t x3565 = x3562; bool x3566 = x3565 >= 2; if (x3566) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3571 = x3565 == 0; if (x3571) { int32_t x3572 = x3563; bool x3573 = x3572 == 1; if (x3573) { } else { assert(false && "must same size!!"); } } else { } float* x3580 = (float*)myGpuMalloc(1 * sizeof(float)); // make sure the size of loss is 1hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x3580, 1.0f, 1); // backend is lantern.TensorDslCudnn$BackendCudnn@22cd45ab CUDA_CALL(hipMemcpy(x1410, x3556, 1 * sizeof(float), hipMemcpyDeviceToDevice)); // 'mean' gradient // backprop for mean op float x3587 = x3580[0]; float x3588 = x3587 / 64.0f;hipLaunchKernelGGL(( addScalar), dim3(28), dim3(512), 0, 0, x3534, x3534, x3588, 64); // 'nllLossB' gradient.hipLaunchKernelGGL(( nllLoss_grad), dim3(64), dim3(1), 0, 0, 10, x3534, x1405, x3531); int32_t x3592 = 0; int32_t x3593 = 1; x3593 *= 64; x3593 *= 10; x3593 *= 1; x3593 *= 1; int32_t x3598 = x3592; bool x3599 = x3598 >= 2; if (x3599) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3604 = x3598 == 0; if (x3604) { int32_t x3605 = x3593; bool x3606 = x3605 == 640; if (x3606) { } else { assert(false && "must same size!!"); } } else { } int32_t x3613 = 0; int32_t x3614 = 1; x3614 *= 64; x3614 *= 10; x3614 *= 1; x3614 *= 1; int32_t x3619 = x3613; bool x3620 = x3619 >= 2; if (x3620) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3625 = x3619 == 0; if (x3625) { int32_t x3626 = x3614; bool x3627 = x3626 == 640; if (x3627) { } else { assert(false && "must same size!!"); } } else { } int32_t x3634 = 0; int32_t x3635 = 1; x3635 *= 64; x3635 *= 10; x3635 *= 1; x3635 *= 1; int32_t x3640 = x3634; bool x3641 = x3640 >= 2; if (x3641) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3646 = x3640 == 0; if (x3646) { int32_t x3647 = x3635; bool x3648 = x3647 == 640; if (x3648) { } else { assert(false && "must same size!!"); } } else { } int32_t x3655 = 0; int32_t x3656 = 1; x3656 *= 64; x3656 *= 10; x3656 *= 1; x3656 *= 1; int32_t x3661 = x3655; bool x3662 = x3661 >= 2; if (x3662) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3667 = x3661 == 0; if (x3667) { int32_t x3668 = x3656; bool x3669 = x3668 == 640; if (x3669) { } else { assert(false && "must same size!!"); } } else { } float* x3676 = (float*)myMalloc(1 * sizeof(float));; x3676[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnSoftmaxBackward( cudnnHandle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_CHANNEL, x3676, x_desc, x3510, x_desc, x3531, x3676, x_desc, x3479)); }; float* x3679 = (float*)myMalloc(1 * sizeof(float));; x3679[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 10, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x3679, grad_out_desc, x3479, x3679, grad_bias_desc, x1155)); }; // backprop for gemm List(Const(64), Sym(3469)), Vector(Const(10), Const(2048)) float* x3683 = (float*)myMalloc(1 * sizeof(float));; x3683[0] = 1.0f; float* x3685 = (float*)myMalloc(1 * sizeof(float));; x3685[0] = 1.0f; // backprop of gemm int32_t x3469 = x3442 / x3468; CUBLAS_CALL(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, x3469,64,10,x3683,x976,x3469,x3479,10,x3685,x3447,x3469)); CUBLAS_CALL(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, x3469,10,64,x3683,x3445,x3469,x3479,10,x3685,x1334,x3469)); float* x3690 = (float*)myMalloc(1 * sizeof(float));; x3690[0] = 0.0f; float* x3692 = (float*)myMalloc(1 * sizeof(float));; x3692[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3439, x3439)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 1, 1 )); CUDNN_CALL(cudnnPoolingBackward( cudnnHandle, poolingDesc, x3692, out_desc, x3445, out_desc, x3447, in_desc, x3400 , x3690, in_desc, x3408)); }; float* x3695 = (float*)myMalloc(1 * sizeof(float));; x3695[0] = 1.0f; float* x3697 = (float*)myMalloc(1 * sizeof(float));; x3697[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3695, x_desc, x3400, x_desc, x3408, x_desc, x3400, x3697, x_desc, x3408)); }; if (x3701) { if (x3704) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3387) x Sym(3387), res: x Const(64) x Const(2048) x Sym(3270) x Sym(3270)"); } float* x3709 = (float*)myMalloc(1 * sizeof(float));; x3709[0] = 1.0f; float* x3711 = (float*)myMalloc(1 * sizeof(float));; x3711[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3709, bias_desc, x3408, x3711, out_desc, x3291)); }; } else { float* x3715 = (float*)myMalloc(1 * sizeof(float));; x3715[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x3715, grad_out_desc, x3408, x3715, grad_bias_desc, x3291)); }; } float* x3720 = (float*)myMalloc(1 * sizeof(float));; x3720[0] = 0.0f; float* x3722 = (float*)myMalloc(1 * sizeof(float));; x3722[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3722, x3722, x3722, x3722, in_desc, x3393, out_desc, x3408, in_desc, x3399, sbmv_desc, x604, x1210,x1288, 1.0E-5, x3401, x3402)); }; // conv2D back-propagate float* x3726 = (float*)myMalloc(1 * sizeof(float));; x3726[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3726, filt_desc, x394, grad_out_desc, x3399, conv_desc, algo, ws_data, ws_size, x3726, grad_in_desc, x3374)); }; float* x3729 = (float*)myMalloc(1 * sizeof(float));; x3729[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3729, in_desc, x3366, grad_out_desc, x3399, conv_desc, algo, ws_data, ws_size, x3729, grad_filt_desc, x1140)); }; float* x3732 = (float*)myMalloc(1 * sizeof(float));; x3732[0] = 1.0f; float* x3734 = (float*)myMalloc(1 * sizeof(float));; x3734[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3732, x_desc, x3366, x_desc, x3374, x_desc, x3366, x3734, x_desc, x3374)); }; float* x3737 = (float*)myMalloc(1 * sizeof(float));; x3737[0] = 0.0f; float* x3739 = (float*)myMalloc(1 * sizeof(float));; x3739[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3739, x3739, x3739, x3739, in_desc, x3359, out_desc, x3374, in_desc, x3365, sbmv_desc, x877, x1301,x1276, 1.0E-5, x3367, x3368)); }; // conv2D back-propagate float* x3743 = (float*)myMalloc(1 * sizeof(float));; x3743[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3743, filt_desc, x379, grad_out_desc, x3365, conv_desc, algo, ws_data, ws_size, x3743, grad_in_desc, x3338)); }; float* x3746 = (float*)myMalloc(1 * sizeof(float));; x3746[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3746, in_desc, x3330, grad_out_desc, x3365, conv_desc, algo, ws_data, ws_size, x3746, grad_filt_desc, x1135)); }; float* x3749 = (float*)myMalloc(1 * sizeof(float));; x3749[0] = 1.0f; float* x3751 = (float*)myMalloc(1 * sizeof(float));; x3751[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3749, x_desc, x3330, x_desc, x3338, x_desc, x3330, x3751, x_desc, x3338)); }; float* x3754 = (float*)myMalloc(1 * sizeof(float));; x3754[0] = 0.0f; float* x3756 = (float*)myMalloc(1 * sizeof(float));; x3756[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3756, x3756, x3756, x3756, in_desc, x3323, out_desc, x3338, in_desc, x3329, sbmv_desc, x340, x1122,x1185, 1.0E-5, x3331, x3332)); }; // conv2D back-propagate float* x3760 = (float*)myMalloc(1 * sizeof(float));; x3760[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3760, filt_desc, x1090, grad_out_desc, x3329, conv_desc, algo, ws_data, ws_size, x3760, grad_in_desc, x3291)); }; float* x3763 = (float*)myMalloc(1 * sizeof(float));; x3763[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3763, in_desc, x3283, grad_out_desc, x3329, conv_desc, algo, ws_data, ws_size, x3763, grad_filt_desc, x1372)); }; float* x3766 = (float*)myMalloc(1 * sizeof(float));; x3766[0] = 1.0f; float* x3768 = (float*)myMalloc(1 * sizeof(float));; x3768[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3766, x_desc, x3283, x_desc, x3291, x_desc, x3283, x3768, x_desc, x3291)); }; if (x3772) { if (x3774) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3270) x Sym(3270), res: x Const(64) x Const(2048) x Sym(3127) x Sym(3127)"); } float* x3779 = (float*)myMalloc(1 * sizeof(float));; x3779[0] = 1.0f; float* x3781 = (float*)myMalloc(1 * sizeof(float));; x3781[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3779, bias_desc, x3291, x3781, out_desc, x3148)); }; } else { float* x3785 = (float*)myMalloc(1 * sizeof(float));; x3785[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x3785, grad_out_desc, x3291, x3785, grad_bias_desc, x3148)); }; } float* x3790 = (float*)myMalloc(1 * sizeof(float));; x3790[0] = 0.0f; float* x3792 = (float*)myMalloc(1 * sizeof(float));; x3792[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3792, x3792, x3792, x3792, in_desc, x3276, out_desc, x3291, in_desc, x3282, sbmv_desc, x577, x1201,x1251, 1.0E-5, x3284, x3285)); }; // conv2D back-propagate float* x3796 = (float*)myMalloc(1 * sizeof(float));; x3796[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3796, filt_desc, x436, grad_out_desc, x3282, conv_desc, algo, ws_data, ws_size, x3796, grad_in_desc, x3257)); }; float* x3799 = (float*)myMalloc(1 * sizeof(float));; x3799[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3799, in_desc, x3249, grad_out_desc, x3282, conv_desc, algo, ws_data, ws_size, x3799, grad_filt_desc, x1154)); }; float* x3802 = (float*)myMalloc(1 * sizeof(float));; x3802[0] = 1.0f; float* x3804 = (float*)myMalloc(1 * sizeof(float));; x3804[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3802, x_desc, x3249, x_desc, x3257, x_desc, x3249, x3804, x_desc, x3257)); }; float* x3807 = (float*)myMalloc(1 * sizeof(float));; x3807[0] = 0.0f; float* x3809 = (float*)myMalloc(1 * sizeof(float));; x3809[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3809, x3809, x3809, x3809, in_desc, x3242, out_desc, x3257, in_desc, x3248, sbmv_desc, x775, x1267,x1173, 1.0E-5, x3250, x3251)); }; // conv2D back-propagate float* x3813 = (float*)myMalloc(1 * sizeof(float));; x3813[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3813, filt_desc, x760, grad_out_desc, x3248, conv_desc, algo, ws_data, ws_size, x3813, grad_in_desc, x3221)); }; float* x3816 = (float*)myMalloc(1 * sizeof(float));; x3816[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3816, in_desc, x3213, grad_out_desc, x3248, conv_desc, algo, ws_data, ws_size, x3816, grad_filt_desc, x1262)); }; float* x3819 = (float*)myMalloc(1 * sizeof(float));; x3819[0] = 1.0f; float* x3821 = (float*)myMalloc(1 * sizeof(float));; x3821[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3819, x_desc, x3213, x_desc, x3221, x_desc, x3213, x3821, x_desc, x3221)); }; float* x3824 = (float*)myMalloc(1 * sizeof(float));; x3824[0] = 0.0f; float* x3826 = (float*)myMalloc(1 * sizeof(float));; x3826[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3826, x3826, x3826, x3826, in_desc, x3206, out_desc, x3221, in_desc, x3212, sbmv_desc, x433, x1153,x1244, 1.0E-5, x3214, x3215)); }; // conv2D back-propagate float* x3830 = (float*)myMalloc(1 * sizeof(float));; x3830[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3830, filt_desc, x940, grad_out_desc, x3212, conv_desc, algo, ws_data, ws_size, x3830, grad_in_desc, x3148)); }; float* x3833 = (float*)myMalloc(1 * sizeof(float));; x3833[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3833, in_desc, x3140, grad_out_desc, x3212, conv_desc, algo, ws_data, ws_size, x3833, grad_filt_desc, x1322)); }; float* x3836 = (float*)myMalloc(1 * sizeof(float));; x3836[0] = 1.0f; float* x3838 = (float*)myMalloc(1 * sizeof(float));; x3838[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3836, x_desc, x3140, x_desc, x3148, x_desc, x3140, x3838, x_desc, x3148)); }; if (x3842) { if (x3844) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3127) x Sym(3127), res: x Const(64) x Const(2048) x Sym(3153) x Sym(3153)"); } float* x3849 = (float*)myMalloc(1 * sizeof(float));; x3849[0] = 1.0f; float* x3851 = (float*)myMalloc(1 * sizeof(float));; x3851[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3849, bias_desc, x3148, x3851, out_desc, x3174)); }; } else { float* x3855 = (float*)myMalloc(1 * sizeof(float));; x3855[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x3855, grad_out_desc, x3148, x3855, grad_bias_desc, x3174)); }; } float* x3860 = (float*)myMalloc(1 * sizeof(float));; x3860[0] = 0.0f; float* x3862 = (float*)myMalloc(1 * sizeof(float));; x3862[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3862, x3862, x3862, x3862, in_desc, x3159, out_desc, x3174, in_desc, x3165, sbmv_desc, x814, x1280,x1214, 1.0E-5, x3167, x3168)); }; // conv2D back-propagate float* x3866 = (float*)myMalloc(1 * sizeof(float));; x3866[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3866, filt_desc, x937, grad_out_desc, x3165, conv_desc, algo, ws_data, ws_size, x3866, grad_in_desc, x3031)); }; float* x3869 = (float*)myMalloc(1 * sizeof(float));; x3869[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3869, in_desc, x3023, grad_out_desc, x3165, conv_desc, algo, ws_data, ws_size, x3869, grad_filt_desc, x1321)); }; float* x3872 = (float*)myMalloc(1 * sizeof(float));; x3872[0] = 0.0f; float* x3874 = (float*)myMalloc(1 * sizeof(float));; x3874[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3874, x3874, x3874, x3874, in_desc, x3133, out_desc, x3148, in_desc, x3139, sbmv_desc, x1012, x1346,x1169, 1.0E-5, x3141, x3142)); }; // conv2D back-propagate float* x3878 = (float*)myMalloc(1 * sizeof(float));; x3878[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3878, filt_desc, x931, grad_out_desc, x3139, conv_desc, algo, ws_data, ws_size, x3878, grad_in_desc, x3114)); }; float* x3881 = (float*)myMalloc(1 * sizeof(float));; x3881[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3881, in_desc, x3106, grad_out_desc, x3139, conv_desc, algo, ws_data, ws_size, x3881, grad_filt_desc, x1319)); }; float* x3884 = (float*)myMalloc(1 * sizeof(float));; x3884[0] = 1.0f; float* x3886 = (float*)myMalloc(1 * sizeof(float));; x3886[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3884, x_desc, x3106, x_desc, x3114, x_desc, x3106, x3886, x_desc, x3114)); }; float* x3889 = (float*)myMalloc(1 * sizeof(float));; x3889[0] = 0.0f; float* x3891 = (float*)myMalloc(1 * sizeof(float));; x3891[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3891, x3891, x3891, x3891, in_desc, x3099, out_desc, x3114, in_desc, x3105, sbmv_desc, x910, x1312,x1266, 1.0E-5, x3107, x3108)); }; // conv2D back-propagate float* x3895 = (float*)myMalloc(1 * sizeof(float));; x3895[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3895, filt_desc, x397, grad_out_desc, x3105, conv_desc, algo, ws_data, ws_size, x3895, grad_in_desc, x3078)); }; float* x3898 = (float*)myMalloc(1 * sizeof(float));; x3898[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3898, in_desc, x3070, grad_out_desc, x3105, conv_desc, algo, ws_data, ws_size, x3898, grad_filt_desc, x1141)); }; float* x3901 = (float*)myMalloc(1 * sizeof(float));; x3901[0] = 1.0f; float* x3903 = (float*)myMalloc(1 * sizeof(float));; x3903[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3901, x_desc, x3070, x_desc, x3078, x_desc, x3070, x3903, x_desc, x3078)); }; float* x3906 = (float*)myMalloc(1 * sizeof(float));; x3906[0] = 0.0f; float* x3908 = (float*)myMalloc(1 * sizeof(float));; x3908[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3908, x3908, x3908, x3908, in_desc, x3063, out_desc, x3078, in_desc, x3069, sbmv_desc, x898, x1308,x1331, 1.0E-5, x3071, x3072)); }; // conv2D back-propagate float* x3912 = (float*)myMalloc(1 * sizeof(float));; x3912[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3912, filt_desc, x712, grad_out_desc, x3069, conv_desc, algo, ws_data, ws_size, x3912, grad_in_desc, x3031)); }; float* x3915 = (float*)myMalloc(1 * sizeof(float));; x3915[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3915, in_desc, x3023, grad_out_desc, x3069, conv_desc, algo, ws_data, ws_size, x3915, grad_filt_desc, x1246)); }; float* x3918 = (float*)myMalloc(1 * sizeof(float));; x3918[0] = 1.0f; float* x3920 = (float*)myMalloc(1 * sizeof(float));; x3920[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3918, x_desc, x3023, x_desc, x3031, x_desc, x3023, x3920, x_desc, x3031)); }; if (x3924) { if (x3927) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(3010) x Sym(3010), res: x Const(64) x Const(1024) x Sym(2893) x Sym(2893)"); } float* x3932 = (float*)myMalloc(1 * sizeof(float));; x3932[0] = 1.0f; float* x3934 = (float*)myMalloc(1 * sizeof(float));; x3934[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3932, bias_desc, x3031, x3934, out_desc, x2914)); }; } else { float* x3938 = (float*)myMalloc(1 * sizeof(float));; x3938[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x3938, grad_out_desc, x3031, x3938, grad_bias_desc, x2914)); }; } float* x3943 = (float*)myMalloc(1 * sizeof(float));; x3943[0] = 0.0f; float* x3945 = (float*)myMalloc(1 * sizeof(float));; x3945[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3945, x3945, x3945, x3945, in_desc, x3016, out_desc, x3031, in_desc, x3022, sbmv_desc, x1039, x1355,x1200, 1.0E-5, x3024, x3025)); }; // conv2D back-propagate float* x3949 = (float*)myMalloc(1 * sizeof(float));; x3949[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3949, filt_desc, x586, grad_out_desc, x3022, conv_desc, algo, ws_data, ws_size, x3949, grad_in_desc, x2997)); }; float* x3952 = (float*)myMalloc(1 * sizeof(float));; x3952[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3952, in_desc, x2989, grad_out_desc, x3022, conv_desc, algo, ws_data, ws_size, x3952, grad_filt_desc, x1204)); }; float* x3955 = (float*)myMalloc(1 * sizeof(float));; x3955[0] = 1.0f; float* x3957 = (float*)myMalloc(1 * sizeof(float));; x3957[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3955, x_desc, x2989, x_desc, x2997, x_desc, x2989, x3957, x_desc, x2997)); }; float* x3960 = (float*)myMalloc(1 * sizeof(float));; x3960[0] = 0.0f; float* x3962 = (float*)myMalloc(1 * sizeof(float));; x3962[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3962, x3962, x3962, x3962, in_desc, x2982, out_desc, x2997, in_desc, x2988, sbmv_desc, x718, x1248,x1296, 1.0E-5, x2990, x2991)); }; // conv2D back-propagate float* x3966 = (float*)myMalloc(1 * sizeof(float));; x3966[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3966, filt_desc, x973, grad_out_desc, x2988, conv_desc, algo, ws_data, ws_size, x3966, grad_in_desc, x2961)); }; float* x3969 = (float*)myMalloc(1 * sizeof(float));; x3969[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3969, in_desc, x2953, grad_out_desc, x2988, conv_desc, algo, ws_data, ws_size, x3969, grad_filt_desc, x1333)); }; float* x3972 = (float*)myMalloc(1 * sizeof(float));; x3972[0] = 1.0f; float* x3974 = (float*)myMalloc(1 * sizeof(float));; x3974[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3972, x_desc, x2953, x_desc, x2961, x_desc, x2953, x3974, x_desc, x2961)); }; float* x3977 = (float*)myMalloc(1 * sizeof(float));; x3977[0] = 0.0f; float* x3979 = (float*)myMalloc(1 * sizeof(float));; x3979[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3979, x3979, x3979, x3979, in_desc, x2946, out_desc, x2961, in_desc, x2952, sbmv_desc, x550, x1192,x1360, 1.0E-5, x2954, x2955)); }; // conv2D back-propagate float* x3983 = (float*)myMalloc(1 * sizeof(float));; x3983[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3983, filt_desc, x748, grad_out_desc, x2952, conv_desc, algo, ws_data, ws_size, x3983, grad_in_desc, x2914)); }; float* x3986 = (float*)myMalloc(1 * sizeof(float));; x3986[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3986, in_desc, x2906, grad_out_desc, x2952, conv_desc, algo, ws_data, ws_size, x3986, grad_filt_desc, x1258)); }; float* x3989 = (float*)myMalloc(1 * sizeof(float));; x3989[0] = 1.0f; float* x3991 = (float*)myMalloc(1 * sizeof(float));; x3991[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3989, x_desc, x2906, x_desc, x2914, x_desc, x2906, x3991, x_desc, x2914)); }; if (x3995) { if (x3997) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2893) x Sym(2893), res: x Const(64) x Const(1024) x Sym(2776) x Sym(2776)"); } float* x4002 = (float*)myMalloc(1 * sizeof(float));; x4002[0] = 1.0f; float* x4004 = (float*)myMalloc(1 * sizeof(float));; x4004[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4002, bias_desc, x2914, x4004, out_desc, x2797)); }; } else { float* x4008 = (float*)myMalloc(1 * sizeof(float));; x4008[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4008, grad_out_desc, x2914, x4008, grad_bias_desc, x2797)); }; } float* x4013 = (float*)myMalloc(1 * sizeof(float));; x4013[0] = 0.0f; float* x4015 = (float*)myMalloc(1 * sizeof(float));; x4015[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4015, x4015, x4015, x4015, in_desc, x2899, out_desc, x2914, in_desc, x2905, sbmv_desc, x472, x1166,x1227, 1.0E-5, x2907, x2908)); }; // conv2D back-propagate float* x4019 = (float*)myMalloc(1 * sizeof(float));; x4019[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4019, filt_desc, x958, grad_out_desc, x2905, conv_desc, algo, ws_data, ws_size, x4019, grad_in_desc, x2880)); }; float* x4022 = (float*)myMalloc(1 * sizeof(float));; x4022[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4022, in_desc, x2872, grad_out_desc, x2905, conv_desc, algo, ws_data, ws_size, x4022, grad_filt_desc, x1328)); }; float* x4025 = (float*)myMalloc(1 * sizeof(float));; x4025[0] = 1.0f; float* x4027 = (float*)myMalloc(1 * sizeof(float));; x4027[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4025, x_desc, x2872, x_desc, x2880, x_desc, x2872, x4027, x_desc, x2880)); }; float* x4030 = (float*)myMalloc(1 * sizeof(float));; x4030[0] = 0.0f; float* x4032 = (float*)myMalloc(1 * sizeof(float));; x4032[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4032, x4032, x4032, x4032, in_desc, x2865, out_desc, x2880, in_desc, x2871, sbmv_desc, x799, x1275,x1216, 1.0E-5, x2873, x2874)); }; // conv2D back-propagate float* x4036 = (float*)myMalloc(1 * sizeof(float));; x4036[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4036, filt_desc, x1081, grad_out_desc, x2871, conv_desc, algo, ws_data, ws_size, x4036, grad_in_desc, x2844)); }; float* x4039 = (float*)myMalloc(1 * sizeof(float));; x4039[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4039, in_desc, x2836, grad_out_desc, x2871, conv_desc, algo, ws_data, ws_size, x4039, grad_filt_desc, x1369)); }; float* x4042 = (float*)myMalloc(1 * sizeof(float));; x4042[0] = 1.0f; float* x4044 = (float*)myMalloc(1 * sizeof(float));; x4044[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4042, x_desc, x2836, x_desc, x2844, x_desc, x2836, x4044, x_desc, x2844)); }; float* x4047 = (float*)myMalloc(1 * sizeof(float));; x4047[0] = 0.0f; float* x4049 = (float*)myMalloc(1 * sizeof(float));; x4049[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4049, x4049, x4049, x4049, in_desc, x2829, out_desc, x2844, in_desc, x2835, sbmv_desc, x526, x1184,x1292, 1.0E-5, x2837, x2838)); }; // conv2D back-propagate float* x4053 = (float*)myMalloc(1 * sizeof(float));; x4053[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4053, filt_desc, x361, grad_out_desc, x2835, conv_desc, algo, ws_data, ws_size, x4053, grad_in_desc, x2797)); }; float* x4056 = (float*)myMalloc(1 * sizeof(float));; x4056[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4056, in_desc, x2789, grad_out_desc, x2835, conv_desc, algo, ws_data, ws_size, x4056, grad_filt_desc, x1129)); }; float* x4059 = (float*)myMalloc(1 * sizeof(float));; x4059[0] = 1.0f; float* x4061 = (float*)myMalloc(1 * sizeof(float));; x4061[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4059, x_desc, x2789, x_desc, x2797, x_desc, x2789, x4061, x_desc, x2797)); }; if (x4065) { if (x4067) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2776) x Sym(2776), res: x Const(64) x Const(1024) x Sym(2659) x Sym(2659)"); } float* x4072 = (float*)myMalloc(1 * sizeof(float));; x4072[0] = 1.0f; float* x4074 = (float*)myMalloc(1 * sizeof(float));; x4074[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4072, bias_desc, x2797, x4074, out_desc, x2680)); }; } else { float* x4078 = (float*)myMalloc(1 * sizeof(float));; x4078[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4078, grad_out_desc, x2797, x4078, grad_bias_desc, x2680)); }; } float* x4083 = (float*)myMalloc(1 * sizeof(float));; x4083[0] = 0.0f; float* x4085 = (float*)myMalloc(1 * sizeof(float));; x4085[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4085, x4085, x4085, x4085, in_desc, x2782, out_desc, x2797, in_desc, x2788, sbmv_desc, x1009, x1345,x1253, 1.0E-5, x2790, x2791)); }; // conv2D back-propagate float* x4089 = (float*)myMalloc(1 * sizeof(float));; x4089[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4089, filt_desc, x562, grad_out_desc, x2788, conv_desc, algo, ws_data, ws_size, x4089, grad_in_desc, x2763)); }; float* x4092 = (float*)myMalloc(1 * sizeof(float));; x4092[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4092, in_desc, x2755, grad_out_desc, x2788, conv_desc, algo, ws_data, ws_size, x4092, grad_filt_desc, x1196)); }; float* x4095 = (float*)myMalloc(1 * sizeof(float));; x4095[0] = 1.0f; float* x4097 = (float*)myMalloc(1 * sizeof(float));; x4097[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4095, x_desc, x2755, x_desc, x2763, x_desc, x2755, x4097, x_desc, x2763)); }; float* x4100 = (float*)myMalloc(1 * sizeof(float));; x4100[0] = 0.0f; float* x4102 = (float*)myMalloc(1 * sizeof(float));; x4102[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4102, x4102, x4102, x4102, in_desc, x2748, out_desc, x2763, in_desc, x2754, sbmv_desc, x517, x1181,x1243, 1.0E-5, x2756, x2757)); }; // conv2D back-propagate float* x4106 = (float*)myMalloc(1 * sizeof(float));; x4106[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4106, filt_desc, x1042, grad_out_desc, x2754, conv_desc, algo, ws_data, ws_size, x4106, grad_in_desc, x2727)); }; float* x4109 = (float*)myMalloc(1 * sizeof(float));; x4109[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4109, in_desc, x2719, grad_out_desc, x2754, conv_desc, algo, ws_data, ws_size, x4109, grad_filt_desc, x1356)); }; float* x4112 = (float*)myMalloc(1 * sizeof(float));; x4112[0] = 1.0f; float* x4114 = (float*)myMalloc(1 * sizeof(float));; x4114[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4112, x_desc, x2719, x_desc, x2727, x_desc, x2719, x4114, x_desc, x2727)); }; float* x4117 = (float*)myMalloc(1 * sizeof(float));; x4117[0] = 0.0f; float* x4119 = (float*)myMalloc(1 * sizeof(float));; x4119[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4119, x4119, x4119, x4119, in_desc, x2712, out_desc, x2727, in_desc, x2718, sbmv_desc, x571, x1199,x1348, 1.0E-5, x2720, x2721)); }; // conv2D back-propagate float* x4123 = (float*)myMalloc(1 * sizeof(float));; x4123[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4123, filt_desc, x313, grad_out_desc, x2718, conv_desc, algo, ws_data, ws_size, x4123, grad_in_desc, x2680)); }; float* x4126 = (float*)myMalloc(1 * sizeof(float));; x4126[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4126, in_desc, x2672, grad_out_desc, x2718, conv_desc, algo, ws_data, ws_size, x4126, grad_filt_desc, x1113)); }; float* x4129 = (float*)myMalloc(1 * sizeof(float));; x4129[0] = 1.0f; float* x4131 = (float*)myMalloc(1 * sizeof(float));; x4131[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4129, x_desc, x2672, x_desc, x2680, x_desc, x2672, x4131, x_desc, x2680)); }; if (x4135) { if (x4137) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2659) x Sym(2659), res: x Const(64) x Const(1024) x Sym(2542) x Sym(2542)"); } float* x4142 = (float*)myMalloc(1 * sizeof(float));; x4142[0] = 1.0f; float* x4144 = (float*)myMalloc(1 * sizeof(float));; x4144[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4142, bias_desc, x2680, x4144, out_desc, x2563)); }; } else { float* x4148 = (float*)myMalloc(1 * sizeof(float));; x4148[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4148, grad_out_desc, x2680, x4148, grad_bias_desc, x2563)); }; } float* x4153 = (float*)myMalloc(1 * sizeof(float));; x4153[0] = 0.0f; float* x4155 = (float*)myMalloc(1 * sizeof(float));; x4155[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4155, x4155, x4155, x4155, in_desc, x2665, out_desc, x2680, in_desc, x2671, sbmv_desc, x1084, x1370,x1164, 1.0E-5, x2673, x2674)); }; // conv2D back-propagate float* x4159 = (float*)myMalloc(1 * sizeof(float));; x4159[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4159, filt_desc, x643, grad_out_desc, x2671, conv_desc, algo, ws_data, ws_size, x4159, grad_in_desc, x2646)); }; float* x4162 = (float*)myMalloc(1 * sizeof(float));; x4162[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4162, in_desc, x2638, grad_out_desc, x2671, conv_desc, algo, ws_data, ws_size, x4162, grad_filt_desc, x1223)); }; float* x4165 = (float*)myMalloc(1 * sizeof(float));; x4165[0] = 1.0f; float* x4167 = (float*)myMalloc(1 * sizeof(float));; x4167[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4165, x_desc, x2638, x_desc, x2646, x_desc, x2638, x4167, x_desc, x2646)); }; float* x4170 = (float*)myMalloc(1 * sizeof(float));; x4170[0] = 0.0f; float* x4172 = (float*)myMalloc(1 * sizeof(float));; x4172[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4172, x4172, x4172, x4172, in_desc, x2631, out_desc, x2646, in_desc, x2637, sbmv_desc, x979, x1335,x1299, 1.0E-5, x2639, x2640)); }; // conv2D back-propagate float* x4176 = (float*)myMalloc(1 * sizeof(float));; x4176[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4176, filt_desc, x337, grad_out_desc, x2637, conv_desc, algo, ws_data, ws_size, x4176, grad_in_desc, x2610)); }; float* x4179 = (float*)myMalloc(1 * sizeof(float));; x4179[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4179, in_desc, x2602, grad_out_desc, x2637, conv_desc, algo, ws_data, ws_size, x4179, grad_filt_desc, x1121)); }; float* x4182 = (float*)myMalloc(1 * sizeof(float));; x4182[0] = 1.0f; float* x4184 = (float*)myMalloc(1 * sizeof(float));; x4184[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4182, x_desc, x2602, x_desc, x2610, x_desc, x2602, x4184, x_desc, x2610)); }; float* x4187 = (float*)myMalloc(1 * sizeof(float));; x4187[0] = 0.0f; float* x4189 = (float*)myMalloc(1 * sizeof(float));; x4189[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4189, x4189, x4189, x4189, in_desc, x2595, out_desc, x2610, in_desc, x2601, sbmv_desc, x682, x1236,x1304, 1.0E-5, x2603, x2604)); }; // conv2D back-propagate float* x4193 = (float*)myMalloc(1 * sizeof(float));; x4193[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4193, filt_desc, x949, grad_out_desc, x2601, conv_desc, algo, ws_data, ws_size, x4193, grad_in_desc, x2563)); }; float* x4196 = (float*)myMalloc(1 * sizeof(float));; x4196[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4196, in_desc, x2555, grad_out_desc, x2601, conv_desc, algo, ws_data, ws_size, x4196, grad_filt_desc, x1325)); }; float* x4199 = (float*)myMalloc(1 * sizeof(float));; x4199[0] = 1.0f; float* x4201 = (float*)myMalloc(1 * sizeof(float));; x4201[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4199, x_desc, x2555, x_desc, x2563, x_desc, x2555, x4201, x_desc, x2563)); }; if (x4205) { if (x4207) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2542) x Sym(2542), res: x Const(64) x Const(1024) x Sym(2399) x Sym(2399)"); } float* x4212 = (float*)myMalloc(1 * sizeof(float));; x4212[0] = 1.0f; float* x4214 = (float*)myMalloc(1 * sizeof(float));; x4214[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4212, bias_desc, x2563, x4214, out_desc, x2420)); }; } else { float* x4218 = (float*)myMalloc(1 * sizeof(float));; x4218[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4218, grad_out_desc, x2563, x4218, grad_bias_desc, x2420)); }; } float* x4223 = (float*)myMalloc(1 * sizeof(float));; x4223[0] = 0.0f; float* x4225 = (float*)myMalloc(1 * sizeof(float));; x4225[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4225, x4225, x4225, x4225, in_desc, x2548, out_desc, x2563, in_desc, x2554, sbmv_desc, x355, x1127,x1339, 1.0E-5, x2556, x2557)); }; // conv2D back-propagate float* x4229 = (float*)myMalloc(1 * sizeof(float));; x4229[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4229, filt_desc, x463, grad_out_desc, x2554, conv_desc, algo, ws_data, ws_size, x4229, grad_in_desc, x2529)); }; float* x4232 = (float*)myMalloc(1 * sizeof(float));; x4232[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4232, in_desc, x2521, grad_out_desc, x2554, conv_desc, algo, ws_data, ws_size, x4232, grad_filt_desc, x1163)); }; float* x4235 = (float*)myMalloc(1 * sizeof(float));; x4235[0] = 1.0f; float* x4237 = (float*)myMalloc(1 * sizeof(float));; x4237[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4235, x_desc, x2521, x_desc, x2529, x_desc, x2521, x4237, x_desc, x2529)); }; float* x4240 = (float*)myMalloc(1 * sizeof(float));; x4240[0] = 0.0f; float* x4242 = (float*)myMalloc(1 * sizeof(float));; x4242[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4242, x4242, x4242, x4242, in_desc, x2514, out_desc, x2529, in_desc, x2520, sbmv_desc, x1108, x1378,x1203, 1.0E-5, x2522, x2523)); }; // conv2D back-propagate float* x4246 = (float*)myMalloc(1 * sizeof(float));; x4246[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4246, filt_desc, x388, grad_out_desc, x2520, conv_desc, algo, ws_data, ws_size, x4246, grad_in_desc, x2493)); }; float* x4249 = (float*)myMalloc(1 * sizeof(float));; x4249[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4249, in_desc, x2485, grad_out_desc, x2520, conv_desc, algo, ws_data, ws_size, x4249, grad_filt_desc, x1138)); }; float* x4252 = (float*)myMalloc(1 * sizeof(float));; x4252[0] = 1.0f; float* x4254 = (float*)myMalloc(1 * sizeof(float));; x4254[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4252, x_desc, x2485, x_desc, x2493, x_desc, x2485, x4254, x_desc, x2493)); }; float* x4257 = (float*)myMalloc(1 * sizeof(float));; x4257[0] = 0.0f; float* x4259 = (float*)myMalloc(1 * sizeof(float));; x4259[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4259, x4259, x4259, x4259, in_desc, x2478, out_desc, x2493, in_desc, x2484, sbmv_desc, x385, x1137,x1326, 1.0E-5, x2486, x2487)); }; // conv2D back-propagate float* x4263 = (float*)myMalloc(1 * sizeof(float));; x4263[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4263, filt_desc, x334, grad_out_desc, x2484, conv_desc, algo, ws_data, ws_size, x4263, grad_in_desc, x2420)); }; float* x4266 = (float*)myMalloc(1 * sizeof(float));; x4266[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4266, in_desc, x2412, grad_out_desc, x2484, conv_desc, algo, ws_data, ws_size, x4266, grad_filt_desc, x1120)); }; float* x4269 = (float*)myMalloc(1 * sizeof(float));; x4269[0] = 1.0f; float* x4271 = (float*)myMalloc(1 * sizeof(float));; x4271[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4269, x_desc, x2412, x_desc, x2420, x_desc, x2412, x4271, x_desc, x2420)); }; if (x4275) { if (x4277) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2399) x Sym(2399), res: x Const(64) x Const(1024) x Sym(2425) x Sym(2425)"); } float* x4282 = (float*)myMalloc(1 * sizeof(float));; x4282[0] = 1.0f; float* x4284 = (float*)myMalloc(1 * sizeof(float));; x4284[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4282, bias_desc, x2420, x4284, out_desc, x2446)); }; } else { float* x4288 = (float*)myMalloc(1 * sizeof(float));; x4288[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4288, grad_out_desc, x2420, x4288, grad_bias_desc, x2446)); }; } float* x4293 = (float*)myMalloc(1 * sizeof(float));; x4293[0] = 0.0f; float* x4295 = (float*)myMalloc(1 * sizeof(float));; x4295[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4295, x4295, x4295, x4295, in_desc, x2431, out_desc, x2446, in_desc, x2437, sbmv_desc, x382, x1136,x1327, 1.0E-5, x2439, x2440)); }; // conv2D back-propagate float* x4299 = (float*)myMalloc(1 * sizeof(float));; x4299[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4299, filt_desc, x520, grad_out_desc, x2437, conv_desc, algo, ws_data, ws_size, x4299, grad_in_desc, x2303)); }; float* x4302 = (float*)myMalloc(1 * sizeof(float));; x4302[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4302, in_desc, x2295, grad_out_desc, x2437, conv_desc, algo, ws_data, ws_size, x4302, grad_filt_desc, x1182)); }; float* x4305 = (float*)myMalloc(1 * sizeof(float));; x4305[0] = 0.0f; float* x4307 = (float*)myMalloc(1 * sizeof(float));; x4307[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4307, x4307, x4307, x4307, in_desc, x2405, out_desc, x2420, in_desc, x2411, sbmv_desc, x349, x1125,x1224, 1.0E-5, x2413, x2414)); }; // conv2D back-propagate float* x4311 = (float*)myMalloc(1 * sizeof(float));; x4311[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4311, filt_desc, x1102, grad_out_desc, x2411, conv_desc, algo, ws_data, ws_size, x4311, grad_in_desc, x2386)); }; float* x4314 = (float*)myMalloc(1 * sizeof(float));; x4314[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4314, in_desc, x2378, grad_out_desc, x2411, conv_desc, algo, ws_data, ws_size, x4314, grad_filt_desc, x1376)); }; float* x4317 = (float*)myMalloc(1 * sizeof(float));; x4317[0] = 1.0f; float* x4319 = (float*)myMalloc(1 * sizeof(float));; x4319[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4317, x_desc, x2378, x_desc, x2386, x_desc, x2378, x4319, x_desc, x2386)); }; float* x4322 = (float*)myMalloc(1 * sizeof(float));; x4322[0] = 0.0f; float* x4324 = (float*)myMalloc(1 * sizeof(float));; x4324[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4324, x4324, x4324, x4324, in_desc, x2371, out_desc, x2386, in_desc, x2377, sbmv_desc, x619, x1215,x1123, 1.0E-5, x2379, x2380)); }; // conv2D back-propagate float* x4328 = (float*)myMalloc(1 * sizeof(float));; x4328[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4328, filt_desc, x820, grad_out_desc, x2377, conv_desc, algo, ws_data, ws_size, x4328, grad_in_desc, x2350)); }; float* x4331 = (float*)myMalloc(1 * sizeof(float));; x4331[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4331, in_desc, x2342, grad_out_desc, x2377, conv_desc, algo, ws_data, ws_size, x4331, grad_filt_desc, x1282)); }; float* x4334 = (float*)myMalloc(1 * sizeof(float));; x4334[0] = 1.0f; float* x4336 = (float*)myMalloc(1 * sizeof(float));; x4336[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4334, x_desc, x2342, x_desc, x2350, x_desc, x2342, x4336, x_desc, x2350)); }; float* x4339 = (float*)myMalloc(1 * sizeof(float));; x4339[0] = 0.0f; float* x4341 = (float*)myMalloc(1 * sizeof(float));; x4341[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4341, x4341, x4341, x4341, in_desc, x2335, out_desc, x2350, in_desc, x2341, sbmv_desc, x1105, x1377,x1128, 1.0E-5, x2343, x2344)); }; // conv2D back-propagate float* x4345 = (float*)myMalloc(1 * sizeof(float));; x4345[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4345, filt_desc, x835, grad_out_desc, x2341, conv_desc, algo, ws_data, ws_size, x4345, grad_in_desc, x2303)); }; float* x4348 = (float*)myMalloc(1 * sizeof(float));; x4348[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4348, in_desc, x2295, grad_out_desc, x2341, conv_desc, algo, ws_data, ws_size, x4348, grad_filt_desc, x1287)); }; float* x4351 = (float*)myMalloc(1 * sizeof(float));; x4351[0] = 1.0f; float* x4353 = (float*)myMalloc(1 * sizeof(float));; x4353[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4351, x_desc, x2295, x_desc, x2303, x_desc, x2295, x4353, x_desc, x2303)); }; if (x4357) { if (x4360) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2282) x Sym(2282), res: x Const(64) x Const(512) x Sym(2165) x Sym(2165)"); } float* x4365 = (float*)myMalloc(1 * sizeof(float));; x4365[0] = 1.0f; float* x4367 = (float*)myMalloc(1 * sizeof(float));; x4367[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4365, bias_desc, x2303, x4367, out_desc, x2186)); }; } else { float* x4371 = (float*)myMalloc(1 * sizeof(float));; x4371[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4371, grad_out_desc, x2303, x4371, grad_bias_desc, x2186)); }; } float* x4376 = (float*)myMalloc(1 * sizeof(float));; x4376[0] = 0.0f; float* x4378 = (float*)myMalloc(1 * sizeof(float));; x4378[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4378, x4378, x4378, x4378, in_desc, x2288, out_desc, x2303, in_desc, x2294, sbmv_desc, x763, x1263,x1161, 1.0E-5, x2296, x2297)); }; // conv2D back-propagate float* x4382 = (float*)myMalloc(1 * sizeof(float));; x4382[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4382, filt_desc, x460, grad_out_desc, x2294, conv_desc, algo, ws_data, ws_size, x4382, grad_in_desc, x2269)); }; float* x4385 = (float*)myMalloc(1 * sizeof(float));; x4385[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4385, in_desc, x2261, grad_out_desc, x2294, conv_desc, algo, ws_data, ws_size, x4385, grad_filt_desc, x1162)); }; float* x4388 = (float*)myMalloc(1 * sizeof(float));; x4388[0] = 1.0f; float* x4390 = (float*)myMalloc(1 * sizeof(float));; x4390[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4388, x_desc, x2261, x_desc, x2269, x_desc, x2261, x4390, x_desc, x2269)); }; float* x4393 = (float*)myMalloc(1 * sizeof(float));; x4393[0] = 0.0f; float* x4395 = (float*)myMalloc(1 * sizeof(float));; x4395[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4395, x4395, x4395, x4395, in_desc, x2254, out_desc, x2269, in_desc, x2260, sbmv_desc, x532, x1186,x1145, 1.0E-5, x2262, x2263)); }; // conv2D back-propagate float* x4399 = (float*)myMalloc(1 * sizeof(float));; x4399[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4399, filt_desc, x790, grad_out_desc, x2260, conv_desc, algo, ws_data, ws_size, x4399, grad_in_desc, x2233)); }; float* x4402 = (float*)myMalloc(1 * sizeof(float));; x4402[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4402, in_desc, x2225, grad_out_desc, x2260, conv_desc, algo, ws_data, ws_size, x4402, grad_filt_desc, x1272)); }; float* x4405 = (float*)myMalloc(1 * sizeof(float));; x4405[0] = 1.0f; float* x4407 = (float*)myMalloc(1 * sizeof(float));; x4407[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4405, x_desc, x2225, x_desc, x2233, x_desc, x2225, x4407, x_desc, x2233)); }; float* x4410 = (float*)myMalloc(1 * sizeof(float));; x4410[0] = 0.0f; float* x4412 = (float*)myMalloc(1 * sizeof(float));; x4412[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4412, x4412, x4412, x4412, in_desc, x2218, out_desc, x2233, in_desc, x2224, sbmv_desc, x412, x1146,x1349, 1.0E-5, x2226, x2227)); }; // conv2D back-propagate float* x4416 = (float*)myMalloc(1 * sizeof(float));; x4416[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4416, filt_desc, x691, grad_out_desc, x2224, conv_desc, algo, ws_data, ws_size, x4416, grad_in_desc, x2186)); }; float* x4419 = (float*)myMalloc(1 * sizeof(float));; x4419[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4419, in_desc, x2178, grad_out_desc, x2224, conv_desc, algo, ws_data, ws_size, x4419, grad_filt_desc, x1239)); }; float* x4422 = (float*)myMalloc(1 * sizeof(float));; x4422[0] = 1.0f; float* x4424 = (float*)myMalloc(1 * sizeof(float));; x4424[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4422, x_desc, x2178, x_desc, x2186, x_desc, x2178, x4424, x_desc, x2186)); }; if (x4428) { if (x4430) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2165) x Sym(2165), res: x Const(64) x Const(512) x Sym(2048) x Sym(2048)"); } float* x4435 = (float*)myMalloc(1 * sizeof(float));; x4435[0] = 1.0f; float* x4437 = (float*)myMalloc(1 * sizeof(float));; x4437[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4435, bias_desc, x2186, x4437, out_desc, x2069)); }; } else { float* x4441 = (float*)myMalloc(1 * sizeof(float));; x4441[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4441, grad_out_desc, x2186, x4441, grad_bias_desc, x2069)); }; } float* x4446 = (float*)myMalloc(1 * sizeof(float));; x4446[0] = 0.0f; float* x4448 = (float*)myMalloc(1 * sizeof(float));; x4448[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4448, x4448, x4448, x4448, in_desc, x2171, out_desc, x2186, in_desc, x2177, sbmv_desc, x796, x1274,x1189, 1.0E-5, x2179, x2180)); }; // conv2D back-propagate float* x4452 = (float*)myMalloc(1 * sizeof(float));; x4452[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4452, filt_desc, x418, grad_out_desc, x2177, conv_desc, algo, ws_data, ws_size, x4452, grad_in_desc, x2152)); }; float* x4455 = (float*)myMalloc(1 * sizeof(float));; x4455[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4455, in_desc, x2144, grad_out_desc, x2177, conv_desc, algo, ws_data, ws_size, x4455, grad_filt_desc, x1148)); }; float* x4458 = (float*)myMalloc(1 * sizeof(float));; x4458[0] = 1.0f; float* x4460 = (float*)myMalloc(1 * sizeof(float));; x4460[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4458, x_desc, x2144, x_desc, x2152, x_desc, x2144, x4460, x_desc, x2152)); }; float* x4463 = (float*)myMalloc(1 * sizeof(float));; x4463[0] = 0.0f; float* x4465 = (float*)myMalloc(1 * sizeof(float));; x4465[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4465, x4465, x4465, x4465, in_desc, x2137, out_desc, x2152, in_desc, x2143, sbmv_desc, x676, x1234,x1168, 1.0E-5, x2145, x2146)); }; // conv2D back-propagate float* x4469 = (float*)myMalloc(1 * sizeof(float));; x4469[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4469, filt_desc, x868, grad_out_desc, x2143, conv_desc, algo, ws_data, ws_size, x4469, grad_in_desc, x2116)); }; float* x4472 = (float*)myMalloc(1 * sizeof(float));; x4472[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4472, in_desc, x2108, grad_out_desc, x2143, conv_desc, algo, ws_data, ws_size, x4472, grad_filt_desc, x1298)); }; float* x4475 = (float*)myMalloc(1 * sizeof(float));; x4475[0] = 1.0f; float* x4477 = (float*)myMalloc(1 * sizeof(float));; x4477[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4475, x_desc, x2108, x_desc, x2116, x_desc, x2108, x4477, x_desc, x2116)); }; float* x4480 = (float*)myMalloc(1 * sizeof(float));; x4480[0] = 0.0f; float* x4482 = (float*)myMalloc(1 * sizeof(float));; x4482[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4482, x4482, x4482, x4482, in_desc, x2101, out_desc, x2116, in_desc, x2107, sbmv_desc, x430, x1152,x1277, 1.0E-5, x2109, x2110)); }; // conv2D back-propagate float* x4486 = (float*)myMalloc(1 * sizeof(float));; x4486[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4486, filt_desc, x883, grad_out_desc, x2107, conv_desc, algo, ws_data, ws_size, x4486, grad_in_desc, x2069)); }; float* x4489 = (float*)myMalloc(1 * sizeof(float));; x4489[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4489, in_desc, x2061, grad_out_desc, x2107, conv_desc, algo, ws_data, ws_size, x4489, grad_filt_desc, x1303)); }; float* x4492 = (float*)myMalloc(1 * sizeof(float));; x4492[0] = 1.0f; float* x4494 = (float*)myMalloc(1 * sizeof(float));; x4494[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4492, x_desc, x2061, x_desc, x2069, x_desc, x2061, x4494, x_desc, x2069)); }; if (x4498) { if (x4500) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2048) x Sym(2048), res: x Const(64) x Const(512) x Sym(1905) x Sym(1905)"); } float* x4505 = (float*)myMalloc(1 * sizeof(float));; x4505[0] = 1.0f; float* x4507 = (float*)myMalloc(1 * sizeof(float));; x4507[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4505, bias_desc, x2069, x4507, out_desc, x1926)); }; } else { float* x4511 = (float*)myMalloc(1 * sizeof(float));; x4511[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4511, grad_out_desc, x2069, x4511, grad_bias_desc, x1926)); }; } float* x4516 = (float*)myMalloc(1 * sizeof(float));; x4516[0] = 0.0f; float* x4518 = (float*)myMalloc(1 * sizeof(float));; x4518[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4518, x4518, x4518, x4518, in_desc, x2054, out_desc, x2069, in_desc, x2060, sbmv_desc, x451, x1159,x1353, 1.0E-5, x2062, x2063)); }; // conv2D back-propagate float* x4522 = (float*)myMalloc(1 * sizeof(float));; x4522[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4522, filt_desc, x628, grad_out_desc, x2060, conv_desc, algo, ws_data, ws_size, x4522, grad_in_desc, x2035)); }; float* x4525 = (float*)myMalloc(1 * sizeof(float));; x4525[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4525, in_desc, x2027, grad_out_desc, x2060, conv_desc, algo, ws_data, ws_size, x4525, grad_filt_desc, x1218)); }; float* x4528 = (float*)myMalloc(1 * sizeof(float));; x4528[0] = 1.0f; float* x4530 = (float*)myMalloc(1 * sizeof(float));; x4530[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4528, x_desc, x2027, x_desc, x2035, x_desc, x2027, x4530, x_desc, x2035)); }; float* x4533 = (float*)myMalloc(1 * sizeof(float));; x4533[0] = 0.0f; float* x4535 = (float*)myMalloc(1 * sizeof(float));; x4535[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4535, x4535, x4535, x4535, in_desc, x2020, out_desc, x2035, in_desc, x2026, sbmv_desc, x319, x1115,x1202, 1.0E-5, x2028, x2029)); }; // conv2D back-propagate float* x4539 = (float*)myMalloc(1 * sizeof(float));; x4539[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4539, filt_desc, x1000, grad_out_desc, x2026, conv_desc, algo, ws_data, ws_size, x4539, grad_in_desc, x1999)); }; float* x4542 = (float*)myMalloc(1 * sizeof(float));; x4542[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4542, in_desc, x1991, grad_out_desc, x2026, conv_desc, algo, ws_data, ws_size, x4542, grad_filt_desc, x1342)); }; float* x4545 = (float*)myMalloc(1 * sizeof(float));; x4545[0] = 1.0f; float* x4547 = (float*)myMalloc(1 * sizeof(float));; x4547[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4545, x_desc, x1991, x_desc, x1999, x_desc, x1991, x4547, x_desc, x1999)); }; float* x4550 = (float*)myMalloc(1 * sizeof(float));; x4550[0] = 0.0f; float* x4552 = (float*)myMalloc(1 * sizeof(float));; x4552[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4552, x4552, x4552, x4552, in_desc, x1984, out_desc, x1999, in_desc, x1990, sbmv_desc, x961, x1329,x1124, 1.0E-5, x1992, x1993)); }; // conv2D back-propagate float* x4556 = (float*)myMalloc(1 * sizeof(float));; x4556[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4556, filt_desc, x1063, grad_out_desc, x1990, conv_desc, algo, ws_data, ws_size, x4556, grad_in_desc, x1926)); }; float* x4559 = (float*)myMalloc(1 * sizeof(float));; x4559[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4559, in_desc, x1918, grad_out_desc, x1990, conv_desc, algo, ws_data, ws_size, x4559, grad_filt_desc, x1363)); }; float* x4562 = (float*)myMalloc(1 * sizeof(float));; x4562[0] = 1.0f; float* x4564 = (float*)myMalloc(1 * sizeof(float));; x4564[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4562, x_desc, x1918, x_desc, x1926, x_desc, x1918, x4564, x_desc, x1926)); }; if (x4568) { if (x4570) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1905) x Sym(1905), res: x Const(64) x Const(512) x Sym(1931) x Sym(1931)"); } float* x4575 = (float*)myMalloc(1 * sizeof(float));; x4575[0] = 1.0f; float* x4577 = (float*)myMalloc(1 * sizeof(float));; x4577[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4575, bias_desc, x1926, x4577, out_desc, x1952)); }; } else { float* x4581 = (float*)myMalloc(1 * sizeof(float));; x4581[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4581, grad_out_desc, x1926, x4581, grad_bias_desc, x1952)); }; } float* x4586 = (float*)myMalloc(1 * sizeof(float));; x4586[0] = 0.0f; float* x4588 = (float*)myMalloc(1 * sizeof(float));; x4588[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4588, x4588, x4588, x4588, in_desc, x1937, out_desc, x1952, in_desc, x1943, sbmv_desc, x916, x1314,x1226, 1.0E-5, x1945, x1946)); }; // conv2D back-propagate float* x4592 = (float*)myMalloc(1 * sizeof(float));; x4592[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4592, filt_desc, x1069, grad_out_desc, x1943, conv_desc, algo, ws_data, ws_size, x4592, grad_in_desc, x1809)); }; float* x4595 = (float*)myMalloc(1 * sizeof(float));; x4595[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4595, in_desc, x1801, grad_out_desc, x1943, conv_desc, algo, ws_data, ws_size, x4595, grad_filt_desc, x1365)); }; float* x4598 = (float*)myMalloc(1 * sizeof(float));; x4598[0] = 0.0f; float* x4600 = (float*)myMalloc(1 * sizeof(float));; x4600[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4600, x4600, x4600, x4600, in_desc, x1911, out_desc, x1926, in_desc, x1917, sbmv_desc, x730, x1252,x1317, 1.0E-5, x1919, x1920)); }; // conv2D back-propagate float* x4604 = (float*)myMalloc(1 * sizeof(float));; x4604[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4604, filt_desc, x613, grad_out_desc, x1917, conv_desc, algo, ws_data, ws_size, x4604, grad_in_desc, x1892)); }; float* x4607 = (float*)myMalloc(1 * sizeof(float));; x4607[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4607, in_desc, x1884, grad_out_desc, x1917, conv_desc, algo, ws_data, ws_size, x4607, grad_filt_desc, x1213)); }; float* x4610 = (float*)myMalloc(1 * sizeof(float));; x4610[0] = 1.0f; float* x4612 = (float*)myMalloc(1 * sizeof(float));; x4612[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4610, x_desc, x1884, x_desc, x1892, x_desc, x1884, x4612, x_desc, x1892)); }; float* x4615 = (float*)myMalloc(1 * sizeof(float));; x4615[0] = 0.0f; float* x4617 = (float*)myMalloc(1 * sizeof(float));; x4617[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4617, x4617, x4617, x4617, in_desc, x1877, out_desc, x1892, in_desc, x1883, sbmv_desc, x1051, x1359,x1297, 1.0E-5, x1885, x1886)); }; // conv2D back-propagate float* x4621 = (float*)myMalloc(1 * sizeof(float));; x4621[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4621, filt_desc, x376, grad_out_desc, x1883, conv_desc, algo, ws_data, ws_size, x4621, grad_in_desc, x1856)); }; float* x4624 = (float*)myMalloc(1 * sizeof(float));; x4624[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4624, in_desc, x1848, grad_out_desc, x1883, conv_desc, algo, ws_data, ws_size, x4624, grad_filt_desc, x1134)); }; float* x4627 = (float*)myMalloc(1 * sizeof(float));; x4627[0] = 1.0f; float* x4629 = (float*)myMalloc(1 * sizeof(float));; x4629[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4627, x_desc, x1848, x_desc, x1856, x_desc, x1848, x4629, x_desc, x1856)); }; float* x4632 = (float*)myMalloc(1 * sizeof(float));; x4632[0] = 0.0f; float* x4634 = (float*)myMalloc(1 * sizeof(float));; x4634[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4634, x4634, x4634, x4634, in_desc, x1841, out_desc, x1856, in_desc, x1847, sbmv_desc, x547, x1191,x1279, 1.0E-5, x1849, x1850)); }; // conv2D back-propagate float* x4638 = (float*)myMalloc(1 * sizeof(float));; x4638[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4638, filt_desc, x328, grad_out_desc, x1847, conv_desc, algo, ws_data, ws_size, x4638, grad_in_desc, x1809)); }; float* x4641 = (float*)myMalloc(1 * sizeof(float));; x4641[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4641, in_desc, x1801, grad_out_desc, x1847, conv_desc, algo, ws_data, ws_size, x4641, grad_filt_desc, x1118)); }; float* x4644 = (float*)myMalloc(1 * sizeof(float));; x4644[0] = 1.0f; float* x4646 = (float*)myMalloc(1 * sizeof(float));; x4646[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4644, x_desc, x1801, x_desc, x1809, x_desc, x1801, x4646, x_desc, x1809)); }; if (x4650) { if (x4653) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1788) x Sym(1788), res: x Const(64) x Const(256) x Sym(1671) x Sym(1671)"); } float* x4658 = (float*)myMalloc(1 * sizeof(float));; x4658[0] = 1.0f; float* x4660 = (float*)myMalloc(1 * sizeof(float));; x4660[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4658, bias_desc, x1809, x4660, out_desc, x1692)); }; } else { float* x4664 = (float*)myMalloc(1 * sizeof(float));; x4664[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4664, grad_out_desc, x1809, x4664, grad_bias_desc, x1692)); }; } float* x4669 = (float*)myMalloc(1 * sizeof(float));; x4669[0] = 0.0f; float* x4671 = (float*)myMalloc(1 * sizeof(float));; x4671[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4671, x4671, x4671, x4671, in_desc, x1794, out_desc, x1809, in_desc, x1800, sbmv_desc, x406, x1144,x1354, 1.0E-5, x1802, x1803)); }; // conv2D back-propagate float* x4675 = (float*)myMalloc(1 * sizeof(float));; x4675[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4675, filt_desc, x556, grad_out_desc, x1800, conv_desc, algo, ws_data, ws_size, x4675, grad_in_desc, x1775)); }; float* x4678 = (float*)myMalloc(1 * sizeof(float));; x4678[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4678, in_desc, x1767, grad_out_desc, x1800, conv_desc, algo, ws_data, ws_size, x4678, grad_filt_desc, x1194)); }; float* x4681 = (float*)myMalloc(1 * sizeof(float));; x4681[0] = 1.0f; float* x4683 = (float*)myMalloc(1 * sizeof(float));; x4683[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4681, x_desc, x1767, x_desc, x1775, x_desc, x1767, x4683, x_desc, x1775)); }; float* x4686 = (float*)myMalloc(1 * sizeof(float));; x4686[0] = 0.0f; float* x4688 = (float*)myMalloc(1 * sizeof(float));; x4688[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4688, x4688, x4688, x4688, in_desc, x1760, out_desc, x1775, in_desc, x1766, sbmv_desc, x511, x1179,x1242, 1.0E-5, x1768, x1769)); }; // conv2D back-propagate float* x4692 = (float*)myMalloc(1 * sizeof(float));; x4692[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4692, filt_desc, x514, grad_out_desc, x1766, conv_desc, algo, ws_data, ws_size, x4692, grad_in_desc, x1739)); }; float* x4695 = (float*)myMalloc(1 * sizeof(float));; x4695[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4695, in_desc, x1731, grad_out_desc, x1766, conv_desc, algo, ws_data, ws_size, x4695, grad_filt_desc, x1180)); }; float* x4698 = (float*)myMalloc(1 * sizeof(float));; x4698[0] = 1.0f; float* x4700 = (float*)myMalloc(1 * sizeof(float));; x4700[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4698, x_desc, x1731, x_desc, x1739, x_desc, x1731, x4700, x_desc, x1739)); }; float* x4703 = (float*)myMalloc(1 * sizeof(float));; x4703[0] = 0.0f; float* x4705 = (float*)myMalloc(1 * sizeof(float));; x4705[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4705, x4705, x4705, x4705, in_desc, x1724, out_desc, x1739, in_desc, x1730, sbmv_desc, x538, x1188,x1131, 1.0E-5, x1732, x1733)); }; // conv2D back-propagate float* x4709 = (float*)myMalloc(1 * sizeof(float));; x4709[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4709, filt_desc, x745, grad_out_desc, x1730, conv_desc, algo, ws_data, ws_size, x4709, grad_in_desc, x1692)); }; float* x4712 = (float*)myMalloc(1 * sizeof(float));; x4712[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4712, in_desc, x1684, grad_out_desc, x1730, conv_desc, algo, ws_data, ws_size, x4712, grad_filt_desc, x1257)); }; float* x4715 = (float*)myMalloc(1 * sizeof(float));; x4715[0] = 1.0f; float* x4717 = (float*)myMalloc(1 * sizeof(float));; x4717[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4715, x_desc, x1684, x_desc, x1692, x_desc, x1684, x4717, x_desc, x1692)); }; if (x4721) { if (x4723) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1671) x Sym(1671), res: x Const(64) x Const(256) x Sym(1531) x Sym(1531)"); } float* x4728 = (float*)myMalloc(1 * sizeof(float));; x4728[0] = 1.0f; float* x4730 = (float*)myMalloc(1 * sizeof(float));; x4730[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4728, bias_desc, x1692, x4730, out_desc, x1552)); }; } else { float* x4734 = (float*)myMalloc(1 * sizeof(float));; x4734[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4734, grad_out_desc, x1692, x4734, grad_bias_desc, x1552)); }; } float* x4739 = (float*)myMalloc(1 * sizeof(float));; x4739[0] = 0.0f; float* x4741 = (float*)myMalloc(1 * sizeof(float));; x4741[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4741, x4741, x4741, x4741, in_desc, x1677, out_desc, x1692, in_desc, x1683, sbmv_desc, x469, x1165,x1114, 1.0E-5, x1685, x1686)); }; // conv2D back-propagate float* x4745 = (float*)myMalloc(1 * sizeof(float));; x4745[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4745, filt_desc, x685, grad_out_desc, x1683, conv_desc, algo, ws_data, ws_size, x4745, grad_in_desc, x1658)); }; float* x4748 = (float*)myMalloc(1 * sizeof(float));; x4748[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4748, in_desc, x1650, grad_out_desc, x1683, conv_desc, algo, ws_data, ws_size, x4748, grad_filt_desc, x1237)); }; float* x4751 = (float*)myMalloc(1 * sizeof(float));; x4751[0] = 1.0f; float* x4753 = (float*)myMalloc(1 * sizeof(float));; x4753[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4751, x_desc, x1650, x_desc, x1658, x_desc, x1650, x4753, x_desc, x1658)); }; float* x4756 = (float*)myMalloc(1 * sizeof(float));; x4756[0] = 0.0f; float* x4758 = (float*)myMalloc(1 * sizeof(float));; x4758[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4758, x4758, x4758, x4758, in_desc, x1643, out_desc, x1658, in_desc, x1649, sbmv_desc, x919, x1315,x1260, 1.0E-5, x1651, x1652)); }; // conv2D back-propagate float* x4762 = (float*)myMalloc(1 * sizeof(float));; x4762[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4762, filt_desc, x544, grad_out_desc, x1649, conv_desc, algo, ws_data, ws_size, x4762, grad_in_desc, x1622)); }; float* x4765 = (float*)myMalloc(1 * sizeof(float));; x4765[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4765, in_desc, x1614, grad_out_desc, x1649, conv_desc, algo, ws_data, ws_size, x4765, grad_filt_desc, x1190)); }; float* x4768 = (float*)myMalloc(1 * sizeof(float));; x4768[0] = 1.0f; float* x4770 = (float*)myMalloc(1 * sizeof(float));; x4770[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4768, x_desc, x1614, x_desc, x1622, x_desc, x1614, x4770, x_desc, x1622)); }; float* x4773 = (float*)myMalloc(1 * sizeof(float));; x4773[0] = 0.0f; float* x4775 = (float*)myMalloc(1 * sizeof(float));; x4775[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4775, x4775, x4775, x4775, in_desc, x1607, out_desc, x1622, in_desc, x1613, sbmv_desc, x721, x1249,x1167, 1.0E-5, x1615, x1616)); }; // conv2D back-propagate float* x4779 = (float*)myMalloc(1 * sizeof(float));; x4779[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4779, filt_desc, x808, grad_out_desc, x1613, conv_desc, algo, ws_data, ws_size, x4779, grad_in_desc, x1552)); }; float* x4782 = (float*)myMalloc(1 * sizeof(float));; x4782[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4782, in_desc, x1544, grad_out_desc, x1613, conv_desc, algo, ws_data, ws_size, x4782, grad_filt_desc, x1278)); }; float* x4785 = (float*)myMalloc(1 * sizeof(float));; x4785[0] = 1.0f; float* x4787 = (float*)myMalloc(1 * sizeof(float));; x4787[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4785, x_desc, x1544, x_desc, x1552, x_desc, x1544, x4787, x_desc, x1552)); }; if (x4791) { if (x4793) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1531) x Sym(1531), res: x Const(64) x Const(256) x Sym(1461) x Sym(1461)"); } float* x4798 = (float*)myMalloc(1 * sizeof(float));; x4798[0] = 1.0f; float* x4800 = (float*)myMalloc(1 * sizeof(float));; x4800[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4798, bias_desc, x1552, x4800, out_desc, x1575)); }; } else { float* x4804 = (float*)myMalloc(1 * sizeof(float));; x4804[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4804, grad_out_desc, x1552, x4804, grad_bias_desc, x1575)); }; } float* x4809 = (float*)myMalloc(1 * sizeof(float));; x4809[0] = 0.0f; float* x4811 = (float*)myMalloc(1 * sizeof(float));; x4811[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4811, x4811, x4811, x4811, in_desc, x1560, out_desc, x1575, in_desc, x1566, sbmv_desc, x523, x1183,x1310, 1.0E-5, x1568, x1569)); }; // conv2D back-propagate float* x4815 = (float*)myMalloc(1 * sizeof(float));; x4815[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4815, filt_desc, x781, grad_out_desc, x1566, conv_desc, algo, ws_data, ws_size, x4815, grad_in_desc, x1453)); }; float* x4818 = (float*)myMalloc(1 * sizeof(float));; x4818[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4818, in_desc, x1451, grad_out_desc, x1566, conv_desc, algo, ws_data, ws_size, x4818, grad_filt_desc, x1269)); }; float* x4821 = (float*)myMalloc(1 * sizeof(float));; x4821[0] = 0.0f; float* x4823 = (float*)myMalloc(1 * sizeof(float));; x4823[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4823, x4823, x4823, x4823, in_desc, x1537, out_desc, x1552, in_desc, x1543, sbmv_desc, x892, x1306,x1233, 1.0E-5, x1545, x1546)); }; // conv2D back-propagate float* x4827 = (float*)myMalloc(1 * sizeof(float));; x4827[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4827, filt_desc, x391, grad_out_desc, x1543, conv_desc, algo, ws_data, ws_size, x4827, grad_in_desc, x1518)); }; float* x4830 = (float*)myMalloc(1 * sizeof(float));; x4830[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4830, in_desc, x1510, grad_out_desc, x1543, conv_desc, algo, ws_data, ws_size, x4830, grad_filt_desc, x1139)); }; float* x4833 = (float*)myMalloc(1 * sizeof(float));; x4833[0] = 1.0f; float* x4835 = (float*)myMalloc(1 * sizeof(float));; x4835[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4833, x_desc, x1510, x_desc, x1518, x_desc, x1510, x4835, x_desc, x1518)); }; float* x4838 = (float*)myMalloc(1 * sizeof(float));; x4838[0] = 0.0f; float* x4840 = (float*)myMalloc(1 * sizeof(float));; x4840[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4840, x4840, x4840, x4840, in_desc, x1503, out_desc, x1518, in_desc, x1509, sbmv_desc, x787, x1271,x1156, 1.0E-5, x1511, x1512)); }; // conv2D back-propagate float* x4844 = (float*)myMalloc(1 * sizeof(float));; x4844[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4844, filt_desc, x565, grad_out_desc, x1509, conv_desc, algo, ws_data, ws_size, x4844, grad_in_desc, x1482)); }; float* x4847 = (float*)myMalloc(1 * sizeof(float));; x4847[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4847, in_desc, x1474, grad_out_desc, x1509, conv_desc, algo, ws_data, ws_size, x4847, grad_filt_desc, x1197)); }; float* x4850 = (float*)myMalloc(1 * sizeof(float));; x4850[0] = 1.0f; float* x4852 = (float*)myMalloc(1 * sizeof(float));; x4852[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4850, x_desc, x1474, x_desc, x1482, x_desc, x1474, x4852, x_desc, x1482)); }; float* x4855 = (float*)myMalloc(1 * sizeof(float));; x4855[0] = 0.0f; float* x4857 = (float*)myMalloc(1 * sizeof(float));; x4857[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4857, x4857, x4857, x4857, in_desc, x1467, out_desc, x1482, in_desc, x1473, sbmv_desc, x373, x1133,x1160, 1.0E-5, x1475, x1476)); }; // conv2D back-propagate float* x4861 = (float*)myMalloc(1 * sizeof(float));; x4861[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4861, filt_desc, x994, grad_out_desc, x1473, conv_desc, algo, ws_data, ws_size, x4861, grad_in_desc, x1453)); }; float* x4864 = (float*)myMalloc(1 * sizeof(float));; x4864[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4864, in_desc, x1451, grad_out_desc, x1473, conv_desc, algo, ws_data, ws_size, x4864, grad_filt_desc, x1340)); }; float* x4867 = (float*)myMalloc(1 * sizeof(float));; x4867[0] = 0.0f; float* x4869 = (float*)myMalloc(1 * sizeof(float));; x4869[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingBackward( cudnnHandle, poolingDesc, x4869, out_desc, x1451, out_desc, x1453, in_desc, x1425 , x4867, in_desc, x1433)); }; float* x4872 = (float*)myMalloc(1 * sizeof(float));; x4872[0] = 1.0f; float* x4874 = (float*)myMalloc(1 * sizeof(float));; x4874[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4872, x_desc, x1425, x_desc, x1433, x_desc, x1425, x4874, x_desc, x1433)); }; float* x4877 = (float*)myMalloc(1 * sizeof(float));; x4877[0] = 0.0f; float* x4879 = (float*)myMalloc(1 * sizeof(float));; x4879[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4879, x4879, x4879, x4879, in_desc, x1418, out_desc, x1433, in_desc, x1424, sbmv_desc, x913, x1313,x1358, 1.0E-5, x1426, x1427)); }; // conv2D back-propagate float* x4883 = (float*)myMalloc(1 * sizeof(float));; x4883[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 3, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 3, 32, 32)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4883, in_desc, x1402, grad_out_desc, x1424, conv_desc, algo, ws_data, ws_size, x4883, grad_filt_desc, x1259)); }; // Tensor 'toCPU' invocation. float* x4887 = (float*)myMalloc(1 * sizeof(float));; CUDA_CALL(hipMemcpy(x4887, x1410, 1 * sizeof(float), hipMemcpyDeviceToHost)); float x4889 = x4887[0]; x1390 += x4889; float* x4891 = (float*)myMalloc(1 * sizeof(float));; x4891[0] = 1.0f; float* x4893 = (float*)myMalloc(1 * sizeof(float));; x4893[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1024,256,x4891,x313,1024,x4893, x1113, 1024, x313,1024));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1113, 0.0f, 262144); float* x4897 = (float*)myMalloc(1 * sizeof(float));; x4897[0] = 1.0f; float* x4899 = (float*)myMalloc(1 * sizeof(float));; x4899[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x4897,x316,1,x4899, x1114, 1, x316,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1114, 0.0f, 256); float* x4903 = (float*)myMalloc(1 * sizeof(float));; x4903[0] = 1.0f; float* x4905 = (float*)myMalloc(1 * sizeof(float));; x4905[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x4903,x319,1,x4905, x1115, 1, x319,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1115, 0.0f, 128); float* x4909 = (float*)myMalloc(1 * sizeof(float));; x4909[0] = 1.0f; float* x4911 = (float*)myMalloc(1 * sizeof(float));; x4911[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x4909,x322,1,x4911, x1116, 1, x322,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1116, 0.0f, 128); float* x4915 = (float*)myMalloc(1 * sizeof(float));; x4915[0] = 1.0f; float* x4917 = (float*)myMalloc(1 * sizeof(float));; x4917[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x4915,x325,1,x4917, x1117, 1, x325,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1117, 0.0f, 64); float* x4921 = (float*)myMalloc(1 * sizeof(float));; x4921[0] = 1.0f; float* x4923 = (float*)myMalloc(1 * sizeof(float));; x4923[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 256,128,x4921,x328,256,x4923, x1118, 256, x328,256));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1118, 0.0f, 32768); float* x4927 = (float*)myMalloc(1 * sizeof(float));; x4927[0] = 1.0f; float* x4929 = (float*)myMalloc(1 * sizeof(float));; x4929[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x4927,x331,1,x4929, x1119, 1, x331,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1119, 0.0f, 512); float* x4933 = (float*)myMalloc(1 * sizeof(float));; x4933[0] = 1.0f; float* x4935 = (float*)myMalloc(1 * sizeof(float));; x4935[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1024,256,x4933,x334,1024,x4935, x1120, 1024, x334,1024));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1120, 0.0f, 262144); float* x4939 = (float*)myMalloc(1 * sizeof(float));; x4939[0] = 1.0f; float* x4941 = (float*)myMalloc(1 * sizeof(float));; x4941[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 2304,256,x4939,x337,2304,x4941, x1121, 2304, x337,2304));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1121, 0.0f, 589824); float* x4945 = (float*)myMalloc(1 * sizeof(float));; x4945[0] = 1.0f; float* x4947 = (float*)myMalloc(1 * sizeof(float));; x4947[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x4945,x340,1,x4947, x1122, 1, x340,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1122, 0.0f, 512); float* x4951 = (float*)myMalloc(1 * sizeof(float));; x4951[0] = 1.0f; float* x4953 = (float*)myMalloc(1 * sizeof(float));; x4953[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x4951,x343,1,x4953, x1123, 1, x343,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1123, 0.0f, 256); float* x4957 = (float*)myMalloc(1 * sizeof(float));; x4957[0] = 1.0f; float* x4959 = (float*)myMalloc(1 * sizeof(float));; x4959[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x4957,x346,1,x4959, x1124, 1, x346,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1124, 0.0f, 128); float* x4963 = (float*)myMalloc(1 * sizeof(float));; x4963[0] = 1.0f; float* x4965 = (float*)myMalloc(1 * sizeof(float));; x4965[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x4963,x349,1,x4965, x1125, 1, x349,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1125, 0.0f, 1024); float* x4969 = (float*)myMalloc(1 * sizeof(float));; x4969[0] = 1.0f; float* x4971 = (float*)myMalloc(1 * sizeof(float));; x4971[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x4969,x352,1,x4971, x1126, 1, x352,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1126, 0.0f, 512); float* x4975 = (float*)myMalloc(1 * sizeof(float));; x4975[0] = 1.0f; float* x4977 = (float*)myMalloc(1 * sizeof(float));; x4977[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x4975,x355,1,x4977, x1127, 1, x355,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1127, 0.0f, 1024); float* x4981 = (float*)myMalloc(1 * sizeof(float));; x4981[0] = 1.0f; float* x4983 = (float*)myMalloc(1 * sizeof(float));; x4983[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x4981,x358,1,x4983, x1128, 1, x358,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1128, 0.0f, 256); float* x4987 = (float*)myMalloc(1 * sizeof(float));; x4987[0] = 1.0f; float* x4989 = (float*)myMalloc(1 * sizeof(float));; x4989[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1024,256,x4987,x361,1024,x4989, x1129, 1024, x361,1024));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1129, 0.0f, 262144); float* x4993 = (float*)myMalloc(1 * sizeof(float));; x4993[0] = 1.0f; float* x4995 = (float*)myMalloc(1 * sizeof(float));; x4995[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x4993,x364,1,x4995, x1130, 1, x364,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1130, 0.0f, 512); float* x4999 = (float*)myMalloc(1 * sizeof(float));; x4999[0] = 1.0f; float* x5001 = (float*)myMalloc(1 * sizeof(float));; x5001[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x4999,x367,1,x5001, x1131, 1, x367,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1131, 0.0f, 64); float* x5005 = (float*)myMalloc(1 * sizeof(float));; x5005[0] = 1.0f; float* x5007 = (float*)myMalloc(1 * sizeof(float));; x5007[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5005,x370,1,x5007, x1132, 1, x370,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1132, 0.0f, 512); float* x5011 = (float*)myMalloc(1 * sizeof(float));; x5011[0] = 1.0f; float* x5013 = (float*)myMalloc(1 * sizeof(float));; x5013[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5011,x373,1,x5013, x1133, 1, x373,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1133, 0.0f, 64); float* x5017 = (float*)myMalloc(1 * sizeof(float));; x5017[0] = 1.0f; float* x5019 = (float*)myMalloc(1 * sizeof(float));; x5019[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1152,128,x5017,x376,1152,x5019, x1134, 1152, x376,1152));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1134, 0.0f, 147456); float* x5023 = (float*)myMalloc(1 * sizeof(float));; x5023[0] = 1.0f; float* x5025 = (float*)myMalloc(1 * sizeof(float));; x5025[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4608,512,x5023,x379,4608,x5025, x1135, 4608, x379,4608));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1135, 0.0f, 2359296); float* x5029 = (float*)myMalloc(1 * sizeof(float));; x5029[0] = 1.0f; float* x5031 = (float*)myMalloc(1 * sizeof(float));; x5031[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5029,x382,1,x5031, x1136, 1, x382,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1136, 0.0f, 1024); float* x5035 = (float*)myMalloc(1 * sizeof(float));; x5035[0] = 1.0f; float* x5037 = (float*)myMalloc(1 * sizeof(float));; x5037[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5035,x385,1,x5037, x1137, 1, x385,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1137, 0.0f, 256); float* x5041 = (float*)myMalloc(1 * sizeof(float));; x5041[0] = 1.0f; float* x5043 = (float*)myMalloc(1 * sizeof(float));; x5043[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 2304,256,x5041,x388,2304,x5043, x1138, 2304, x388,2304));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1138, 0.0f, 589824); float* x5047 = (float*)myMalloc(1 * sizeof(float));; x5047[0] = 1.0f; float* x5049 = (float*)myMalloc(1 * sizeof(float));; x5049[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 64,256,x5047,x391,64,x5049, x1139, 64, x391,64));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1139, 0.0f, 16384); float* x5053 = (float*)myMalloc(1 * sizeof(float));; x5053[0] = 1.0f; float* x5055 = (float*)myMalloc(1 * sizeof(float));; x5055[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 512,2048,x5053,x394,512,x5055, x1140, 512, x394,512));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1140, 0.0f, 1048576); float* x5059 = (float*)myMalloc(1 * sizeof(float));; x5059[0] = 1.0f; float* x5061 = (float*)myMalloc(1 * sizeof(float));; x5061[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4608,512,x5059,x397,4608,x5061, x1141, 4608, x397,4608));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1141, 0.0f, 2359296); float* x5065 = (float*)myMalloc(1 * sizeof(float));; x5065[0] = 1.0f; float* x5067 = (float*)myMalloc(1 * sizeof(float));; x5067[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5065,x400,1,x5067, x1142, 1, x400,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1142, 0.0f, 128); float* x5071 = (float*)myMalloc(1 * sizeof(float));; x5071[0] = 1.0f; float* x5073 = (float*)myMalloc(1 * sizeof(float));; x5073[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5071,x403,1,x5073, x1143, 1, x403,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1143, 0.0f, 256); float* x5077 = (float*)myMalloc(1 * sizeof(float));; x5077[0] = 1.0f; float* x5079 = (float*)myMalloc(1 * sizeof(float));; x5079[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5077,x406,1,x5079, x1144, 1, x406,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1144, 0.0f, 256); float* x5083 = (float*)myMalloc(1 * sizeof(float));; x5083[0] = 1.0f; float* x5085 = (float*)myMalloc(1 * sizeof(float));; x5085[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5083,x409,1,x5085, x1145, 1, x409,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1145, 0.0f, 128); float* x5089 = (float*)myMalloc(1 * sizeof(float));; x5089[0] = 1.0f; float* x5091 = (float*)myMalloc(1 * sizeof(float));; x5091[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5089,x412,1,x5091, x1146, 1, x412,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1146, 0.0f, 128); float* x5095 = (float*)myMalloc(1 * sizeof(float));; x5095[0] = 1.0f; float* x5097 = (float*)myMalloc(1 * sizeof(float));; x5097[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5095,x415,1,x5097, x1147, 1, x415,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1147, 0.0f, 64); float* x5101 = (float*)myMalloc(1 * sizeof(float));; x5101[0] = 1.0f; float* x5103 = (float*)myMalloc(1 * sizeof(float));; x5103[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 128,512,x5101,x418,128,x5103, x1148, 128, x418,128));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1148, 0.0f, 65536); float* x5107 = (float*)myMalloc(1 * sizeof(float));; x5107[0] = 1.0f; float* x5109 = (float*)myMalloc(1 * sizeof(float));; x5109[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5107,x421,1,x5109, x1149, 1, x421,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1149, 0.0f, 512); float* x5113 = (float*)myMalloc(1 * sizeof(float));; x5113[0] = 1.0f; float* x5115 = (float*)myMalloc(1 * sizeof(float));; x5115[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5113,x424,1,x5115, x1150, 1, x424,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1150, 0.0f, 128); float* x5119 = (float*)myMalloc(1 * sizeof(float));; x5119[0] = 1.0f; float* x5121 = (float*)myMalloc(1 * sizeof(float));; x5121[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5119,x427,1,x5121, x1151, 1, x427,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1151, 0.0f, 64); float* x5125 = (float*)myMalloc(1 * sizeof(float));; x5125[0] = 1.0f; float* x5127 = (float*)myMalloc(1 * sizeof(float));; x5127[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5125,x430,1,x5127, x1152, 1, x430,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1152, 0.0f, 128); float* x5131 = (float*)myMalloc(1 * sizeof(float));; x5131[0] = 1.0f; float* x5133 = (float*)myMalloc(1 * sizeof(float));; x5133[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5131,x433,1,x5133, x1153, 1, x433,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1153, 0.0f, 512); float* x5137 = (float*)myMalloc(1 * sizeof(float));; x5137[0] = 1.0f; float* x5139 = (float*)myMalloc(1 * sizeof(float));; x5139[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 512,2048,x5137,x436,512,x5139, x1154, 512, x436,512));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1154, 0.0f, 1048576); float* x5143 = (float*)myMalloc(1 * sizeof(float));; x5143[0] = 1.0f; float* x5145 = (float*)myMalloc(1 * sizeof(float));; x5145[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,10,x5143,x439,1,x5145, x1155, 1, x439,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1155, 0.0f, 10); float* x5149 = (float*)myMalloc(1 * sizeof(float));; x5149[0] = 1.0f; float* x5151 = (float*)myMalloc(1 * sizeof(float));; x5151[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5149,x442,1,x5151, x1156, 1, x442,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1156, 0.0f, 64); float* x5155 = (float*)myMalloc(1 * sizeof(float));; x5155[0] = 1.0f; float* x5157 = (float*)myMalloc(1 * sizeof(float));; x5157[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5155,x445,1,x5157, x1157, 1, x445,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1157, 0.0f, 512); float* x5161 = (float*)myMalloc(1 * sizeof(float));; x5161[0] = 1.0f; float* x5163 = (float*)myMalloc(1 * sizeof(float));; x5163[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5161,x448,1,x5163, x1158, 1, x448,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1158, 0.0f, 64); float* x5167 = (float*)myMalloc(1 * sizeof(float));; x5167[0] = 1.0f; float* x5169 = (float*)myMalloc(1 * sizeof(float));; x5169[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5167,x451,1,x5169, x1159, 1, x451,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1159, 0.0f, 512); float* x5173 = (float*)myMalloc(1 * sizeof(float));; x5173[0] = 1.0f; float* x5175 = (float*)myMalloc(1 * sizeof(float));; x5175[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5173,x454,1,x5175, x1160, 1, x454,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1160, 0.0f, 64); float* x5179 = (float*)myMalloc(1 * sizeof(float));; x5179[0] = 1.0f; float* x5181 = (float*)myMalloc(1 * sizeof(float));; x5181[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5179,x457,1,x5181, x1161, 1, x457,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1161, 0.0f, 512); float* x5185 = (float*)myMalloc(1 * sizeof(float));; x5185[0] = 1.0f; float* x5187 = (float*)myMalloc(1 * sizeof(float));; x5187[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 128,512,x5185,x460,128,x5187, x1162, 128, x460,128));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1162, 0.0f, 65536); float* x5191 = (float*)myMalloc(1 * sizeof(float));; x5191[0] = 1.0f; float* x5193 = (float*)myMalloc(1 * sizeof(float));; x5193[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 256,1024,x5191,x463,256,x5193, x1163, 256, x463,256));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1163, 0.0f, 262144); float* x5197 = (float*)myMalloc(1 * sizeof(float));; x5197[0] = 1.0f; float* x5199 = (float*)myMalloc(1 * sizeof(float));; x5199[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5197,x466,1,x5199, x1164, 1, x466,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1164, 0.0f, 1024); float* x5203 = (float*)myMalloc(1 * sizeof(float));; x5203[0] = 1.0f; float* x5205 = (float*)myMalloc(1 * sizeof(float));; x5205[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5203,x469,1,x5205, x1165, 1, x469,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1165, 0.0f, 256); float* x5209 = (float*)myMalloc(1 * sizeof(float));; x5209[0] = 1.0f; float* x5211 = (float*)myMalloc(1 * sizeof(float));; x5211[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5209,x472,1,x5211, x1166, 1, x472,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1166, 0.0f, 1024); float* x5215 = (float*)myMalloc(1 * sizeof(float));; x5215[0] = 1.0f; float* x5217 = (float*)myMalloc(1 * sizeof(float));; x5217[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5215,x475,1,x5217, x1167, 1, x475,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1167, 0.0f, 64); float* x5221 = (float*)myMalloc(1 * sizeof(float));; x5221[0] = 1.0f; float* x5223 = (float*)myMalloc(1 * sizeof(float));; x5223[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5221,x478,1,x5223, x1168, 1, x478,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1168, 0.0f, 128); float* x5227 = (float*)myMalloc(1 * sizeof(float));; x5227[0] = 1.0f; float* x5229 = (float*)myMalloc(1 * sizeof(float));; x5229[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5227,x481,1,x5229, x1169, 1, x481,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1169, 0.0f, 2048); float* x5233 = (float*)myMalloc(1 * sizeof(float));; x5233[0] = 1.0f; float* x5235 = (float*)myMalloc(1 * sizeof(float));; x5235[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5233,x484,1,x5235, x1170, 1, x484,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1170, 0.0f, 256); float* x5239 = (float*)myMalloc(1 * sizeof(float));; x5239[0] = 1.0f; float* x5241 = (float*)myMalloc(1 * sizeof(float));; x5241[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5239,x487,1,x5241, x1171, 1, x487,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1171, 0.0f, 2048); float* x5245 = (float*)myMalloc(1 * sizeof(float));; x5245[0] = 1.0f; float* x5247 = (float*)myMalloc(1 * sizeof(float));; x5247[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5245,x490,1,x5247, x1172, 1, x490,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1172, 0.0f, 512); float* x5251 = (float*)myMalloc(1 * sizeof(float));; x5251[0] = 1.0f; float* x5253 = (float*)myMalloc(1 * sizeof(float));; x5253[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5251,x493,1,x5253, x1173, 1, x493,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1173, 0.0f, 512); float* x5257 = (float*)myMalloc(1 * sizeof(float));; x5257[0] = 1.0f; float* x5259 = (float*)myMalloc(1 * sizeof(float));; x5259[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5257,x496,1,x5259, x1174, 1, x496,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1174, 0.0f, 512); float* x5263 = (float*)myMalloc(1 * sizeof(float));; x5263[0] = 1.0f; float* x5265 = (float*)myMalloc(1 * sizeof(float));; x5265[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5263,x499,1,x5265, x1175, 1, x499,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1175, 0.0f, 2048); float* x5269 = (float*)myMalloc(1 * sizeof(float));; x5269[0] = 1.0f; float* x5271 = (float*)myMalloc(1 * sizeof(float));; x5271[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5269,x502,1,x5271, x1176, 1, x502,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1176, 0.0f, 256); float* x5275 = (float*)myMalloc(1 * sizeof(float));; x5275[0] = 1.0f; float* x5277 = (float*)myMalloc(1 * sizeof(float));; x5277[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5275,x505,1,x5277, x1177, 1, x505,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1177, 0.0f, 256); float* x5281 = (float*)myMalloc(1 * sizeof(float));; x5281[0] = 1.0f; float* x5283 = (float*)myMalloc(1 * sizeof(float));; x5283[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5281,x508,1,x5283, x1178, 1, x508,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1178, 0.0f, 256); float* x5287 = (float*)myMalloc(1 * sizeof(float));; x5287[0] = 1.0f; float* x5289 = (float*)myMalloc(1 * sizeof(float));; x5289[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5287,x511,1,x5289, x1179, 1, x511,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1179, 0.0f, 64); float* x5293 = (float*)myMalloc(1 * sizeof(float));; x5293[0] = 1.0f; float* x5295 = (float*)myMalloc(1 * sizeof(float));; x5295[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 576,64,x5293,x514,576,x5295, x1180, 576, x514,576));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1180, 0.0f, 36864); float* x5299 = (float*)myMalloc(1 * sizeof(float));; x5299[0] = 1.0f; float* x5301 = (float*)myMalloc(1 * sizeof(float));; x5301[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5299,x517,1,x5301, x1181, 1, x517,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1181, 0.0f, 256); float* x5305 = (float*)myMalloc(1 * sizeof(float));; x5305[0] = 1.0f; float* x5307 = (float*)myMalloc(1 * sizeof(float));; x5307[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 512,1024,x5305,x520,512,x5307, x1182, 512, x520,512));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1182, 0.0f, 524288); float* x5311 = (float*)myMalloc(1 * sizeof(float));; x5311[0] = 1.0f; float* x5313 = (float*)myMalloc(1 * sizeof(float));; x5313[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5311,x523,1,x5313, x1183, 1, x523,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1183, 0.0f, 256); float* x5317 = (float*)myMalloc(1 * sizeof(float));; x5317[0] = 1.0f; float* x5319 = (float*)myMalloc(1 * sizeof(float));; x5319[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5317,x526,1,x5319, x1184, 1, x526,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1184, 0.0f, 256); float* x5323 = (float*)myMalloc(1 * sizeof(float));; x5323[0] = 1.0f; float* x5325 = (float*)myMalloc(1 * sizeof(float));; x5325[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5323,x529,1,x5325, x1185, 1, x529,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1185, 0.0f, 512); float* x5329 = (float*)myMalloc(1 * sizeof(float));; x5329[0] = 1.0f; float* x5331 = (float*)myMalloc(1 * sizeof(float));; x5331[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5329,x532,1,x5331, x1186, 1, x532,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1186, 0.0f, 128); float* x5335 = (float*)myMalloc(1 * sizeof(float));; x5335[0] = 1.0f; float* x5337 = (float*)myMalloc(1 * sizeof(float));; x5337[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5335,x535,1,x5337, x1187, 1, x535,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1187, 0.0f, 256); float* x5341 = (float*)myMalloc(1 * sizeof(float));; x5341[0] = 1.0f; float* x5343 = (float*)myMalloc(1 * sizeof(float));; x5343[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5341,x538,1,x5343, x1188, 1, x538,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1188, 0.0f, 64); float* x5347 = (float*)myMalloc(1 * sizeof(float));; x5347[0] = 1.0f; float* x5349 = (float*)myMalloc(1 * sizeof(float));; x5349[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5347,x541,1,x5349, x1189, 1, x541,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1189, 0.0f, 512); float* x5353 = (float*)myMalloc(1 * sizeof(float));; x5353[0] = 1.0f; float* x5355 = (float*)myMalloc(1 * sizeof(float));; x5355[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 576,64,x5353,x544,576,x5355, x1190, 576, x544,576));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1190, 0.0f, 36864); float* x5359 = (float*)myMalloc(1 * sizeof(float));; x5359[0] = 1.0f; float* x5361 = (float*)myMalloc(1 * sizeof(float));; x5361[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5359,x547,1,x5361, x1191, 1, x547,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1191, 0.0f, 128); float* x5365 = (float*)myMalloc(1 * sizeof(float));; x5365[0] = 1.0f; float* x5367 = (float*)myMalloc(1 * sizeof(float));; x5367[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5365,x550,1,x5367, x1192, 1, x550,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1192, 0.0f, 256); float* x5371 = (float*)myMalloc(1 * sizeof(float));; x5371[0] = 1.0f; float* x5373 = (float*)myMalloc(1 * sizeof(float));; x5373[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5371,x553,1,x5373, x1193, 1, x553,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1193, 0.0f, 1024); float* x5377 = (float*)myMalloc(1 * sizeof(float));; x5377[0] = 1.0f; float* x5379 = (float*)myMalloc(1 * sizeof(float));; x5379[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 64,256,x5377,x556,64,x5379, x1194, 64, x556,64));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1194, 0.0f, 16384); float* x5383 = (float*)myMalloc(1 * sizeof(float));; x5383[0] = 1.0f; float* x5385 = (float*)myMalloc(1 * sizeof(float));; x5385[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5383,x559,1,x5385, x1195, 1, x559,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1195, 0.0f, 512); float* x5389 = (float*)myMalloc(1 * sizeof(float));; x5389[0] = 1.0f; float* x5391 = (float*)myMalloc(1 * sizeof(float));; x5391[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 256,1024,x5389,x562,256,x5391, x1196, 256, x562,256));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1196, 0.0f, 262144); float* x5395 = (float*)myMalloc(1 * sizeof(float));; x5395[0] = 1.0f; float* x5397 = (float*)myMalloc(1 * sizeof(float));; x5397[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 576,64,x5395,x565,576,x5397, x1197, 576, x565,576));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1197, 0.0f, 36864); float* x5401 = (float*)myMalloc(1 * sizeof(float));; x5401[0] = 1.0f; float* x5403 = (float*)myMalloc(1 * sizeof(float));; x5403[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5401,x568,1,x5403, x1198, 1, x568,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1198, 0.0f, 256); float* x5407 = (float*)myMalloc(1 * sizeof(float));; x5407[0] = 1.0f; float* x5409 = (float*)myMalloc(1 * sizeof(float));; x5409[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5407,x571,1,x5409, x1199, 1, x571,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1199, 0.0f, 256); float* x5413 = (float*)myMalloc(1 * sizeof(float));; x5413[0] = 1.0f; float* x5415 = (float*)myMalloc(1 * sizeof(float));; x5415[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5413,x574,1,x5415, x1200, 1, x574,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1200, 0.0f, 1024); float* x5419 = (float*)myMalloc(1 * sizeof(float));; x5419[0] = 1.0f; float* x5421 = (float*)myMalloc(1 * sizeof(float));; x5421[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5419,x577,1,x5421, x1201, 1, x577,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1201, 0.0f, 2048); float* x5425 = (float*)myMalloc(1 * sizeof(float));; x5425[0] = 1.0f; float* x5427 = (float*)myMalloc(1 * sizeof(float));; x5427[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5425,x580,1,x5427, x1202, 1, x580,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1202, 0.0f, 128); float* x5431 = (float*)myMalloc(1 * sizeof(float));; x5431[0] = 1.0f; float* x5433 = (float*)myMalloc(1 * sizeof(float));; x5433[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5431,x583,1,x5433, x1203, 1, x583,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1203, 0.0f, 256); float* x5437 = (float*)myMalloc(1 * sizeof(float));; x5437[0] = 1.0f; float* x5439 = (float*)myMalloc(1 * sizeof(float));; x5439[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 256,1024,x5437,x586,256,x5439, x1204, 256, x586,256));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1204, 0.0f, 262144); float* x5443 = (float*)myMalloc(1 * sizeof(float));; x5443[0] = 1.0f; float* x5445 = (float*)myMalloc(1 * sizeof(float));; x5445[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5443,x589,1,x5445, x1205, 1, x589,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1205, 0.0f, 256); float* x5449 = (float*)myMalloc(1 * sizeof(float));; x5449[0] = 1.0f; float* x5451 = (float*)myMalloc(1 * sizeof(float));; x5451[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5449,x592,1,x5451, x1206, 1, x592,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1206, 0.0f, 256); float* x5455 = (float*)myMalloc(1 * sizeof(float));; x5455[0] = 1.0f; float* x5457 = (float*)myMalloc(1 * sizeof(float));; x5457[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5455,x595,1,x5457, x1207, 1, x595,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1207, 0.0f, 128); float* x5461 = (float*)myMalloc(1 * sizeof(float));; x5461[0] = 1.0f; float* x5463 = (float*)myMalloc(1 * sizeof(float));; x5463[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5461,x598,1,x5463, x1208, 1, x598,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1208, 0.0f, 512); float* x5467 = (float*)myMalloc(1 * sizeof(float));; x5467[0] = 1.0f; float* x5469 = (float*)myMalloc(1 * sizeof(float));; x5469[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5467,x601,1,x5469, x1209, 1, x601,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1209, 0.0f, 64); float* x5473 = (float*)myMalloc(1 * sizeof(float));; x5473[0] = 1.0f; float* x5475 = (float*)myMalloc(1 * sizeof(float));; x5475[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5473,x604,1,x5475, x1210, 1, x604,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1210, 0.0f, 2048); float* x5479 = (float*)myMalloc(1 * sizeof(float));; x5479[0] = 1.0f; float* x5481 = (float*)myMalloc(1 * sizeof(float));; x5481[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5479,x607,1,x5481, x1211, 1, x607,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1211, 0.0f, 256); float* x5485 = (float*)myMalloc(1 * sizeof(float));; x5485[0] = 1.0f; float* x5487 = (float*)myMalloc(1 * sizeof(float));; x5487[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5485,x610,1,x5487, x1212, 1, x610,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1212, 0.0f, 64); float* x5491 = (float*)myMalloc(1 * sizeof(float));; x5491[0] = 1.0f; float* x5493 = (float*)myMalloc(1 * sizeof(float));; x5493[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 128,512,x5491,x613,128,x5493, x1213, 128, x613,128));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1213, 0.0f, 65536); float* x5497 = (float*)myMalloc(1 * sizeof(float));; x5497[0] = 1.0f; float* x5499 = (float*)myMalloc(1 * sizeof(float));; x5499[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5497,x616,1,x5499, x1214, 1, x616,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1214, 0.0f, 2048); float* x5503 = (float*)myMalloc(1 * sizeof(float));; x5503[0] = 1.0f; float* x5505 = (float*)myMalloc(1 * sizeof(float));; x5505[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5503,x619,1,x5505, x1215, 1, x619,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1215, 0.0f, 256); float* x5509 = (float*)myMalloc(1 * sizeof(float));; x5509[0] = 1.0f; float* x5511 = (float*)myMalloc(1 * sizeof(float));; x5511[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5509,x622,1,x5511, x1216, 1, x622,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1216, 0.0f, 256); float* x5515 = (float*)myMalloc(1 * sizeof(float));; x5515[0] = 1.0f; float* x5517 = (float*)myMalloc(1 * sizeof(float));; x5517[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5515,x625,1,x5517, x1217, 1, x625,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1217, 0.0f, 64); float* x5521 = (float*)myMalloc(1 * sizeof(float));; x5521[0] = 1.0f; float* x5523 = (float*)myMalloc(1 * sizeof(float));; x5523[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 128,512,x5521,x628,128,x5523, x1218, 128, x628,128));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1218, 0.0f, 65536); float* x5527 = (float*)myMalloc(1 * sizeof(float));; x5527[0] = 1.0f; float* x5529 = (float*)myMalloc(1 * sizeof(float));; x5529[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5527,x631,1,x5529, x1219, 1, x631,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1219, 0.0f, 128); float* x5533 = (float*)myMalloc(1 * sizeof(float));; x5533[0] = 1.0f; float* x5535 = (float*)myMalloc(1 * sizeof(float));; x5535[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5533,x634,1,x5535, x1220, 1, x634,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1220, 0.0f, 512); float* x5539 = (float*)myMalloc(1 * sizeof(float));; x5539[0] = 1.0f; float* x5541 = (float*)myMalloc(1 * sizeof(float));; x5541[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5539,x637,1,x5541, x1221, 1, x637,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1221, 0.0f, 64); float* x5545 = (float*)myMalloc(1 * sizeof(float));; x5545[0] = 1.0f; float* x5547 = (float*)myMalloc(1 * sizeof(float));; x5547[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5545,x640,1,x5547, x1222, 1, x640,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1222, 0.0f, 2048); float* x5551 = (float*)myMalloc(1 * sizeof(float));; x5551[0] = 1.0f; float* x5553 = (float*)myMalloc(1 * sizeof(float));; x5553[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 256,1024,x5551,x643,256,x5553, x1223, 256, x643,256));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1223, 0.0f, 262144); float* x5557 = (float*)myMalloc(1 * sizeof(float));; x5557[0] = 1.0f; float* x5559 = (float*)myMalloc(1 * sizeof(float));; x5559[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5557,x646,1,x5559, x1224, 1, x646,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1224, 0.0f, 1024); float* x5563 = (float*)myMalloc(1 * sizeof(float));; x5563[0] = 1.0f; float* x5565 = (float*)myMalloc(1 * sizeof(float));; x5565[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5563,x649,1,x5565, x1225, 1, x649,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1225, 0.0f, 64); float* x5569 = (float*)myMalloc(1 * sizeof(float));; x5569[0] = 1.0f; float* x5571 = (float*)myMalloc(1 * sizeof(float));; x5571[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5569,x652,1,x5571, x1226, 1, x652,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1226, 0.0f, 512); float* x5575 = (float*)myMalloc(1 * sizeof(float));; x5575[0] = 1.0f; float* x5577 = (float*)myMalloc(1 * sizeof(float));; x5577[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5575,x655,1,x5577, x1227, 1, x655,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1227, 0.0f, 1024); float* x5581 = (float*)myMalloc(1 * sizeof(float));; x5581[0] = 1.0f; float* x5583 = (float*)myMalloc(1 * sizeof(float));; x5583[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5581,x658,1,x5583, x1228, 1, x658,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1228, 0.0f, 512); float* x5587 = (float*)myMalloc(1 * sizeof(float));; x5587[0] = 1.0f; float* x5589 = (float*)myMalloc(1 * sizeof(float));; x5589[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5587,x661,1,x5589, x1229, 1, x661,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1229, 0.0f, 1024); float* x5593 = (float*)myMalloc(1 * sizeof(float));; x5593[0] = 1.0f; float* x5595 = (float*)myMalloc(1 * sizeof(float));; x5595[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5593,x664,1,x5595, x1230, 1, x664,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1230, 0.0f, 2048); float* x5599 = (float*)myMalloc(1 * sizeof(float));; x5599[0] = 1.0f; float* x5601 = (float*)myMalloc(1 * sizeof(float));; x5601[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5599,x667,1,x5601, x1231, 1, x667,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1231, 0.0f, 256); float* x5605 = (float*)myMalloc(1 * sizeof(float));; x5605[0] = 1.0f; float* x5607 = (float*)myMalloc(1 * sizeof(float));; x5607[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5605,x670,1,x5607, x1232, 1, x670,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1232, 0.0f, 2048); float* x5611 = (float*)myMalloc(1 * sizeof(float));; x5611[0] = 1.0f; float* x5613 = (float*)myMalloc(1 * sizeof(float));; x5613[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5611,x673,1,x5613, x1233, 1, x673,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1233, 0.0f, 256); float* x5617 = (float*)myMalloc(1 * sizeof(float));; x5617[0] = 1.0f; float* x5619 = (float*)myMalloc(1 * sizeof(float));; x5619[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5617,x676,1,x5619, x1234, 1, x676,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1234, 0.0f, 128); float* x5623 = (float*)myMalloc(1 * sizeof(float));; x5623[0] = 1.0f; float* x5625 = (float*)myMalloc(1 * sizeof(float));; x5625[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5623,x679,1,x5625, x1235, 1, x679,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1235, 0.0f, 128); float* x5629 = (float*)myMalloc(1 * sizeof(float));; x5629[0] = 1.0f; float* x5631 = (float*)myMalloc(1 * sizeof(float));; x5631[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5629,x682,1,x5631, x1236, 1, x682,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1236, 0.0f, 256); float* x5635 = (float*)myMalloc(1 * sizeof(float));; x5635[0] = 1.0f; float* x5637 = (float*)myMalloc(1 * sizeof(float));; x5637[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 64,256,x5635,x685,64,x5637, x1237, 64, x685,64));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1237, 0.0f, 16384); float* x5641 = (float*)myMalloc(1 * sizeof(float));; x5641[0] = 1.0f; float* x5643 = (float*)myMalloc(1 * sizeof(float));; x5643[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5641,x688,1,x5643, x1238, 1, x688,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1238, 0.0f, 256); float* x5647 = (float*)myMalloc(1 * sizeof(float));; x5647[0] = 1.0f; float* x5649 = (float*)myMalloc(1 * sizeof(float));; x5649[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 512,128,x5647,x691,512,x5649, x1239, 512, x691,512));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1239, 0.0f, 65536); float* x5653 = (float*)myMalloc(1 * sizeof(float));; x5653[0] = 1.0f; float* x5655 = (float*)myMalloc(1 * sizeof(float));; x5655[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5653,x694,1,x5655, x1240, 1, x694,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1240, 0.0f, 256); float* x5659 = (float*)myMalloc(1 * sizeof(float));; x5659[0] = 1.0f; float* x5661 = (float*)myMalloc(1 * sizeof(float));; x5661[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5659,x697,1,x5661, x1241, 1, x697,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1241, 0.0f, 128); float* x5665 = (float*)myMalloc(1 * sizeof(float));; x5665[0] = 1.0f; float* x5667 = (float*)myMalloc(1 * sizeof(float));; x5667[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5665,x700,1,x5667, x1242, 1, x700,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1242, 0.0f, 64); float* x5671 = (float*)myMalloc(1 * sizeof(float));; x5671[0] = 1.0f; float* x5673 = (float*)myMalloc(1 * sizeof(float));; x5673[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5671,x703,1,x5673, x1243, 1, x703,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1243, 0.0f, 256); float* x5677 = (float*)myMalloc(1 * sizeof(float));; x5677[0] = 1.0f; float* x5679 = (float*)myMalloc(1 * sizeof(float));; x5679[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5677,x706,1,x5679, x1244, 1, x706,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1244, 0.0f, 512); float* x5683 = (float*)myMalloc(1 * sizeof(float));; x5683[0] = 1.0f; float* x5685 = (float*)myMalloc(1 * sizeof(float));; x5685[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5683,x709,1,x5685, x1245, 1, x709,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1245, 0.0f, 512); float* x5689 = (float*)myMalloc(1 * sizeof(float));; x5689[0] = 1.0f; float* x5691 = (float*)myMalloc(1 * sizeof(float));; x5691[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1024,512,x5689,x712,1024,x5691, x1246, 1024, x712,1024));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1246, 0.0f, 524288); float* x5695 = (float*)myMalloc(1 * sizeof(float));; x5695[0] = 1.0f; float* x5697 = (float*)myMalloc(1 * sizeof(float));; x5697[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5695,x715,1,x5697, x1247, 1, x715,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1247, 0.0f, 1024); float* x5701 = (float*)myMalloc(1 * sizeof(float));; x5701[0] = 1.0f; float* x5703 = (float*)myMalloc(1 * sizeof(float));; x5703[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5701,x718,1,x5703, x1248, 1, x718,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1248, 0.0f, 256); float* x5707 = (float*)myMalloc(1 * sizeof(float));; x5707[0] = 1.0f; float* x5709 = (float*)myMalloc(1 * sizeof(float));; x5709[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5707,x721,1,x5709, x1249, 1, x721,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1249, 0.0f, 64); float* x5713 = (float*)myMalloc(1 * sizeof(float));; x5713[0] = 1.0f; float* x5715 = (float*)myMalloc(1 * sizeof(float));; x5715[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5713,x724,1,x5715, x1250, 1, x724,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1250, 0.0f, 1024); float* x5719 = (float*)myMalloc(1 * sizeof(float));; x5719[0] = 1.0f; float* x5721 = (float*)myMalloc(1 * sizeof(float));; x5721[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5719,x727,1,x5721, x1251, 1, x727,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1251, 0.0f, 2048); float* x5725 = (float*)myMalloc(1 * sizeof(float));; x5725[0] = 1.0f; float* x5727 = (float*)myMalloc(1 * sizeof(float));; x5727[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5725,x730,1,x5727, x1252, 1, x730,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1252, 0.0f, 512); float* x5731 = (float*)myMalloc(1 * sizeof(float));; x5731[0] = 1.0f; float* x5733 = (float*)myMalloc(1 * sizeof(float));; x5733[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5731,x733,1,x5733, x1253, 1, x733,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1253, 0.0f, 1024); float* x5737 = (float*)myMalloc(1 * sizeof(float));; x5737[0] = 1.0f; float* x5739 = (float*)myMalloc(1 * sizeof(float));; x5739[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5737,x736,1,x5739, x1254, 1, x736,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1254, 0.0f, 512); float* x5743 = (float*)myMalloc(1 * sizeof(float));; x5743[0] = 1.0f; float* x5745 = (float*)myMalloc(1 * sizeof(float));; x5745[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5743,x739,1,x5745, x1255, 1, x739,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1255, 0.0f, 128); float* x5749 = (float*)myMalloc(1 * sizeof(float));; x5749[0] = 1.0f; float* x5751 = (float*)myMalloc(1 * sizeof(float));; x5751[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5749,x742,1,x5751, x1256, 1, x742,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1256, 0.0f, 512); float* x5755 = (float*)myMalloc(1 * sizeof(float));; x5755[0] = 1.0f; float* x5757 = (float*)myMalloc(1 * sizeof(float));; x5757[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 256,64,x5755,x745,256,x5757, x1257, 256, x745,256));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1257, 0.0f, 16384); float* x5761 = (float*)myMalloc(1 * sizeof(float));; x5761[0] = 1.0f; float* x5763 = (float*)myMalloc(1 * sizeof(float));; x5763[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1024,256,x5761,x748,1024,x5763, x1258, 1024, x748,1024));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1258, 0.0f, 262144); float* x5767 = (float*)myMalloc(1 * sizeof(float));; x5767[0] = 1.0f; float* x5769 = (float*)myMalloc(1 * sizeof(float));; x5769[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 27,64,x5767,x751,27,x5769, x1259, 27, x751,27));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1259, 0.0f, 1728); float* x5773 = (float*)myMalloc(1 * sizeof(float));; x5773[0] = 1.0f; float* x5775 = (float*)myMalloc(1 * sizeof(float));; x5775[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5773,x754,1,x5775, x1260, 1, x754,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1260, 0.0f, 64); float* x5779 = (float*)myMalloc(1 * sizeof(float));; x5779[0] = 1.0f; float* x5781 = (float*)myMalloc(1 * sizeof(float));; x5781[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5779,x757,1,x5781, x1261, 1, x757,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1261, 0.0f, 512); float* x5785 = (float*)myMalloc(1 * sizeof(float));; x5785[0] = 1.0f; float* x5787 = (float*)myMalloc(1 * sizeof(float));; x5787[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4608,512,x5785,x760,4608,x5787, x1262, 4608, x760,4608));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1262, 0.0f, 2359296); float* x5791 = (float*)myMalloc(1 * sizeof(float));; x5791[0] = 1.0f; float* x5793 = (float*)myMalloc(1 * sizeof(float));; x5793[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5791,x763,1,x5793, x1263, 1, x763,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1263, 0.0f, 512); float* x5797 = (float*)myMalloc(1 * sizeof(float));; x5797[0] = 1.0f; float* x5799 = (float*)myMalloc(1 * sizeof(float));; x5799[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5797,x766,1,x5799, x1264, 1, x766,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1264, 0.0f, 256); float* x5803 = (float*)myMalloc(1 * sizeof(float));; x5803[0] = 1.0f; float* x5805 = (float*)myMalloc(1 * sizeof(float));; x5805[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5803,x769,1,x5805, x1265, 1, x769,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1265, 0.0f, 64); float* x5809 = (float*)myMalloc(1 * sizeof(float));; x5809[0] = 1.0f; float* x5811 = (float*)myMalloc(1 * sizeof(float));; x5811[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5809,x772,1,x5811, x1266, 1, x772,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1266, 0.0f, 512); float* x5815 = (float*)myMalloc(1 * sizeof(float));; x5815[0] = 1.0f; float* x5817 = (float*)myMalloc(1 * sizeof(float));; x5817[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5815,x775,1,x5817, x1267, 1, x775,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1267, 0.0f, 512); float* x5821 = (float*)myMalloc(1 * sizeof(float));; x5821[0] = 1.0f; float* x5823 = (float*)myMalloc(1 * sizeof(float));; x5823[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5821,x778,1,x5823, x1268, 1, x778,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1268, 0.0f, 1024); float* x5827 = (float*)myMalloc(1 * sizeof(float));; x5827[0] = 1.0f; float* x5829 = (float*)myMalloc(1 * sizeof(float));; x5829[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 64,256,x5827,x781,64,x5829, x1269, 64, x781,64));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1269, 0.0f, 16384); float* x5833 = (float*)myMalloc(1 * sizeof(float));; x5833[0] = 1.0f; float* x5835 = (float*)myMalloc(1 * sizeof(float));; x5835[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5833,x784,1,x5835, x1270, 1, x784,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1270, 0.0f, 256); float* x5839 = (float*)myMalloc(1 * sizeof(float));; x5839[0] = 1.0f; float* x5841 = (float*)myMalloc(1 * sizeof(float));; x5841[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5839,x787,1,x5841, x1271, 1, x787,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1271, 0.0f, 64); float* x5845 = (float*)myMalloc(1 * sizeof(float));; x5845[0] = 1.0f; float* x5847 = (float*)myMalloc(1 * sizeof(float));; x5847[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1152,128,x5845,x790,1152,x5847, x1272, 1152, x790,1152));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1272, 0.0f, 147456); float* x5851 = (float*)myMalloc(1 * sizeof(float));; x5851[0] = 1.0f; float* x5853 = (float*)myMalloc(1 * sizeof(float));; x5853[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5851,x793,1,x5853, x1273, 1, x793,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1273, 0.0f, 256); float* x5857 = (float*)myMalloc(1 * sizeof(float));; x5857[0] = 1.0f; float* x5859 = (float*)myMalloc(1 * sizeof(float));; x5859[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5857,x796,1,x5859, x1274, 1, x796,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1274, 0.0f, 512); float* x5863 = (float*)myMalloc(1 * sizeof(float));; x5863[0] = 1.0f; float* x5865 = (float*)myMalloc(1 * sizeof(float));; x5865[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5863,x799,1,x5865, x1275, 1, x799,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1275, 0.0f, 256); float* x5869 = (float*)myMalloc(1 * sizeof(float));; x5869[0] = 1.0f; float* x5871 = (float*)myMalloc(1 * sizeof(float));; x5871[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x5869,x802,1,x5871, x1276, 1, x802,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1276, 0.0f, 512); float* x5875 = (float*)myMalloc(1 * sizeof(float));; x5875[0] = 1.0f; float* x5877 = (float*)myMalloc(1 * sizeof(float));; x5877[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5875,x805,1,x5877, x1277, 1, x805,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1277, 0.0f, 128); float* x5881 = (float*)myMalloc(1 * sizeof(float));; x5881[0] = 1.0f; float* x5883 = (float*)myMalloc(1 * sizeof(float));; x5883[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 256,64,x5881,x808,256,x5883, x1278, 256, x808,256));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1278, 0.0f, 16384); float* x5887 = (float*)myMalloc(1 * sizeof(float));; x5887[0] = 1.0f; float* x5889 = (float*)myMalloc(1 * sizeof(float));; x5889[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5887,x811,1,x5889, x1279, 1, x811,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1279, 0.0f, 128); float* x5893 = (float*)myMalloc(1 * sizeof(float));; x5893[0] = 1.0f; float* x5895 = (float*)myMalloc(1 * sizeof(float));; x5895[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5893,x814,1,x5895, x1280, 1, x814,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1280, 0.0f, 2048); float* x5899 = (float*)myMalloc(1 * sizeof(float));; x5899[0] = 1.0f; float* x5901 = (float*)myMalloc(1 * sizeof(float));; x5901[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5899,x817,1,x5901, x1281, 1, x817,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1281, 0.0f, 256); float* x5905 = (float*)myMalloc(1 * sizeof(float));; x5905[0] = 1.0f; float* x5907 = (float*)myMalloc(1 * sizeof(float));; x5907[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 2304,256,x5905,x820,2304,x5907, x1282, 2304, x820,2304));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1282, 0.0f, 589824); float* x5911 = (float*)myMalloc(1 * sizeof(float));; x5911[0] = 1.0f; float* x5913 = (float*)myMalloc(1 * sizeof(float));; x5913[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5911,x823,1,x5913, x1283, 1, x823,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1283, 0.0f, 256); float* x5917 = (float*)myMalloc(1 * sizeof(float));; x5917[0] = 1.0f; float* x5919 = (float*)myMalloc(1 * sizeof(float));; x5919[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5917,x826,1,x5919, x1284, 1, x826,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1284, 0.0f, 128); float* x5923 = (float*)myMalloc(1 * sizeof(float));; x5923[0] = 1.0f; float* x5925 = (float*)myMalloc(1 * sizeof(float));; x5925[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5923,x829,1,x5925, x1285, 1, x829,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1285, 0.0f, 256); float* x5929 = (float*)myMalloc(1 * sizeof(float));; x5929[0] = 1.0f; float* x5931 = (float*)myMalloc(1 * sizeof(float));; x5931[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5929,x832,1,x5931, x1286, 1, x832,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1286, 0.0f, 64); float* x5935 = (float*)myMalloc(1 * sizeof(float));; x5935[0] = 1.0f; float* x5937 = (float*)myMalloc(1 * sizeof(float));; x5937[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 512,256,x5935,x835,512,x5937, x1287, 512, x835,512));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1287, 0.0f, 131072); float* x5941 = (float*)myMalloc(1 * sizeof(float));; x5941[0] = 1.0f; float* x5943 = (float*)myMalloc(1 * sizeof(float));; x5943[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x5941,x838,1,x5943, x1288, 1, x838,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1288, 0.0f, 2048); float* x5947 = (float*)myMalloc(1 * sizeof(float));; x5947[0] = 1.0f; float* x5949 = (float*)myMalloc(1 * sizeof(float));; x5949[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5947,x841,1,x5949, x1289, 1, x841,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1289, 0.0f, 1024); float* x5953 = (float*)myMalloc(1 * sizeof(float));; x5953[0] = 1.0f; float* x5955 = (float*)myMalloc(1 * sizeof(float));; x5955[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5953,x844,1,x5955, x1290, 1, x844,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1290, 0.0f, 1024); float* x5959 = (float*)myMalloc(1 * sizeof(float));; x5959[0] = 1.0f; float* x5961 = (float*)myMalloc(1 * sizeof(float));; x5961[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5959,x847,1,x5961, x1291, 1, x847,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1291, 0.0f, 256); float* x5965 = (float*)myMalloc(1 * sizeof(float));; x5965[0] = 1.0f; float* x5967 = (float*)myMalloc(1 * sizeof(float));; x5967[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5965,x850,1,x5967, x1292, 1, x850,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1292, 0.0f, 256); float* x5971 = (float*)myMalloc(1 * sizeof(float));; x5971[0] = 1.0f; float* x5973 = (float*)myMalloc(1 * sizeof(float));; x5973[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5971,x853,1,x5973, x1293, 1, x853,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1293, 0.0f, 256); float* x5977 = (float*)myMalloc(1 * sizeof(float));; x5977[0] = 1.0f; float* x5979 = (float*)myMalloc(1 * sizeof(float));; x5979[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x5977,x856,1,x5979, x1294, 1, x856,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1294, 0.0f, 64); float* x5983 = (float*)myMalloc(1 * sizeof(float));; x5983[0] = 1.0f; float* x5985 = (float*)myMalloc(1 * sizeof(float));; x5985[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x5983,x859,1,x5985, x1295, 1, x859,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1295, 0.0f, 1024); float* x5989 = (float*)myMalloc(1 * sizeof(float));; x5989[0] = 1.0f; float* x5991 = (float*)myMalloc(1 * sizeof(float));; x5991[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x5989,x862,1,x5991, x1296, 1, x862,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1296, 0.0f, 256); float* x5995 = (float*)myMalloc(1 * sizeof(float));; x5995[0] = 1.0f; float* x5997 = (float*)myMalloc(1 * sizeof(float));; x5997[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x5995,x865,1,x5997, x1297, 1, x865,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1297, 0.0f, 128); float* x6001 = (float*)myMalloc(1 * sizeof(float));; x6001[0] = 1.0f; float* x6003 = (float*)myMalloc(1 * sizeof(float));; x6003[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1152,128,x6001,x868,1152,x6003, x1298, 1152, x868,1152));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1298, 0.0f, 147456); float* x6007 = (float*)myMalloc(1 * sizeof(float));; x6007[0] = 1.0f; float* x6009 = (float*)myMalloc(1 * sizeof(float));; x6009[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6007,x871,1,x6009, x1299, 1, x871,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1299, 0.0f, 256); float* x6013 = (float*)myMalloc(1 * sizeof(float));; x6013[0] = 1.0f; float* x6015 = (float*)myMalloc(1 * sizeof(float));; x6015[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x6013,x874,1,x6015, x1300, 1, x874,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1300, 0.0f, 2048); float* x6019 = (float*)myMalloc(1 * sizeof(float));; x6019[0] = 1.0f; float* x6021 = (float*)myMalloc(1 * sizeof(float));; x6021[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6019,x877,1,x6021, x1301, 1, x877,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1301, 0.0f, 512); float* x6025 = (float*)myMalloc(1 * sizeof(float));; x6025[0] = 1.0f; float* x6027 = (float*)myMalloc(1 * sizeof(float));; x6027[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6025,x880,1,x6027, x1302, 1, x880,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1302, 0.0f, 512); float* x6031 = (float*)myMalloc(1 * sizeof(float));; x6031[0] = 1.0f; float* x6033 = (float*)myMalloc(1 * sizeof(float));; x6033[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 512,128,x6031,x883,512,x6033, x1303, 512, x883,512));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1303, 0.0f, 65536); float* x6037 = (float*)myMalloc(1 * sizeof(float));; x6037[0] = 1.0f; float* x6039 = (float*)myMalloc(1 * sizeof(float));; x6039[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6037,x886,1,x6039, x1304, 1, x886,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1304, 0.0f, 256); float* x6043 = (float*)myMalloc(1 * sizeof(float));; x6043[0] = 1.0f; float* x6045 = (float*)myMalloc(1 * sizeof(float));; x6045[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6043,x889,1,x6045, x1305, 1, x889,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1305, 0.0f, 256); float* x6049 = (float*)myMalloc(1 * sizeof(float));; x6049[0] = 1.0f; float* x6051 = (float*)myMalloc(1 * sizeof(float));; x6051[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6049,x892,1,x6051, x1306, 1, x892,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1306, 0.0f, 256); float* x6055 = (float*)myMalloc(1 * sizeof(float));; x6055[0] = 1.0f; float* x6057 = (float*)myMalloc(1 * sizeof(float));; x6057[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6055,x895,1,x6057, x1307, 1, x895,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1307, 0.0f, 256); float* x6061 = (float*)myMalloc(1 * sizeof(float));; x6061[0] = 1.0f; float* x6063 = (float*)myMalloc(1 * sizeof(float));; x6063[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6061,x898,1,x6063, x1308, 1, x898,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1308, 0.0f, 512); float* x6067 = (float*)myMalloc(1 * sizeof(float));; x6067[0] = 1.0f; float* x6069 = (float*)myMalloc(1 * sizeof(float));; x6069[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6067,x901,1,x6069, x1309, 1, x901,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1309, 0.0f, 512); float* x6073 = (float*)myMalloc(1 * sizeof(float));; x6073[0] = 1.0f; float* x6075 = (float*)myMalloc(1 * sizeof(float));; x6075[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6073,x904,1,x6075, x1310, 1, x904,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1310, 0.0f, 256); float* x6079 = (float*)myMalloc(1 * sizeof(float));; x6079[0] = 1.0f; float* x6081 = (float*)myMalloc(1 * sizeof(float));; x6081[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x6079,x907,1,x6081, x1311, 1, x907,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1311, 0.0f, 128); float* x6085 = (float*)myMalloc(1 * sizeof(float));; x6085[0] = 1.0f; float* x6087 = (float*)myMalloc(1 * sizeof(float));; x6087[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6085,x910,1,x6087, x1312, 1, x910,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1312, 0.0f, 512); float* x6091 = (float*)myMalloc(1 * sizeof(float));; x6091[0] = 1.0f; float* x6093 = (float*)myMalloc(1 * sizeof(float));; x6093[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x6091,x913,1,x6093, x1313, 1, x913,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1313, 0.0f, 64); float* x6097 = (float*)myMalloc(1 * sizeof(float));; x6097[0] = 1.0f; float* x6099 = (float*)myMalloc(1 * sizeof(float));; x6099[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6097,x916,1,x6099, x1314, 1, x916,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1314, 0.0f, 512); float* x6103 = (float*)myMalloc(1 * sizeof(float));; x6103[0] = 1.0f; float* x6105 = (float*)myMalloc(1 * sizeof(float));; x6105[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x6103,x919,1,x6105, x1315, 1, x919,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1315, 0.0f, 64); float* x6109 = (float*)myMalloc(1 * sizeof(float));; x6109[0] = 1.0f; float* x6111 = (float*)myMalloc(1 * sizeof(float));; x6111[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6109,x922,1,x6111, x1316, 1, x922,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1316, 0.0f, 1024); float* x6115 = (float*)myMalloc(1 * sizeof(float));; x6115[0] = 1.0f; float* x6117 = (float*)myMalloc(1 * sizeof(float));; x6117[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6115,x925,1,x6117, x1317, 1, x925,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1317, 0.0f, 512); float* x6121 = (float*)myMalloc(1 * sizeof(float));; x6121[0] = 1.0f; float* x6123 = (float*)myMalloc(1 * sizeof(float));; x6123[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6121,x928,1,x6123, x1318, 1, x928,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1318, 0.0f, 1024); float* x6127 = (float*)myMalloc(1 * sizeof(float));; x6127[0] = 1.0f; float* x6129 = (float*)myMalloc(1 * sizeof(float));; x6129[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 512,2048,x6127,x931,512,x6129, x1319, 512, x931,512));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1319, 0.0f, 1048576); float* x6133 = (float*)myMalloc(1 * sizeof(float));; x6133[0] = 1.0f; float* x6135 = (float*)myMalloc(1 * sizeof(float));; x6135[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6133,x934,1,x6135, x1320, 1, x934,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1320, 0.0f, 512); float* x6139 = (float*)myMalloc(1 * sizeof(float));; x6139[0] = 1.0f; float* x6141 = (float*)myMalloc(1 * sizeof(float));; x6141[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1024,2048,x6139,x937,1024,x6141, x1321, 1024, x937,1024));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1321, 0.0f, 2097152); float* x6145 = (float*)myMalloc(1 * sizeof(float));; x6145[0] = 1.0f; float* x6147 = (float*)myMalloc(1 * sizeof(float));; x6147[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 2048,512,x6145,x940,2048,x6147, x1322, 2048, x940,2048));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1322, 0.0f, 1048576); float* x6151 = (float*)myMalloc(1 * sizeof(float));; x6151[0] = 1.0f; float* x6153 = (float*)myMalloc(1 * sizeof(float));; x6153[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6151,x943,1,x6153, x1323, 1, x943,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1323, 0.0f, 1024); float* x6157 = (float*)myMalloc(1 * sizeof(float));; x6157[0] = 1.0f; float* x6159 = (float*)myMalloc(1 * sizeof(float));; x6159[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x6157,x946,1,x6159, x1324, 1, x946,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1324, 0.0f, 128); float* x6163 = (float*)myMalloc(1 * sizeof(float));; x6163[0] = 1.0f; float* x6165 = (float*)myMalloc(1 * sizeof(float));; x6165[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1024,256,x6163,x949,1024,x6165, x1325, 1024, x949,1024));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1325, 0.0f, 262144); float* x6169 = (float*)myMalloc(1 * sizeof(float));; x6169[0] = 1.0f; float* x6171 = (float*)myMalloc(1 * sizeof(float));; x6171[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6169,x952,1,x6171, x1326, 1, x952,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1326, 0.0f, 256); float* x6175 = (float*)myMalloc(1 * sizeof(float));; x6175[0] = 1.0f; float* x6177 = (float*)myMalloc(1 * sizeof(float));; x6177[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6175,x955,1,x6177, x1327, 1, x955,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1327, 0.0f, 1024); float* x6181 = (float*)myMalloc(1 * sizeof(float));; x6181[0] = 1.0f; float* x6183 = (float*)myMalloc(1 * sizeof(float));; x6183[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 256,1024,x6181,x958,256,x6183, x1328, 256, x958,256));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1328, 0.0f, 262144); float* x6187 = (float*)myMalloc(1 * sizeof(float));; x6187[0] = 1.0f; float* x6189 = (float*)myMalloc(1 * sizeof(float));; x6189[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x6187,x961,1,x6189, x1329, 1, x961,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1329, 0.0f, 128); float* x6193 = (float*)myMalloc(1 * sizeof(float));; x6193[0] = 1.0f; float* x6195 = (float*)myMalloc(1 * sizeof(float));; x6195[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6193,x964,1,x6195, x1330, 1, x964,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1330, 0.0f, 512); float* x6199 = (float*)myMalloc(1 * sizeof(float));; x6199[0] = 1.0f; float* x6201 = (float*)myMalloc(1 * sizeof(float));; x6201[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6199,x967,1,x6201, x1331, 1, x967,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1331, 0.0f, 512); float* x6205 = (float*)myMalloc(1 * sizeof(float));; x6205[0] = 1.0f; float* x6207 = (float*)myMalloc(1 * sizeof(float));; x6207[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x6205,x970,1,x6207, x1332, 1, x970,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1332, 0.0f, 128); float* x6211 = (float*)myMalloc(1 * sizeof(float));; x6211[0] = 1.0f; float* x6213 = (float*)myMalloc(1 * sizeof(float));; x6213[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 2304,256,x6211,x973,2304,x6213, x1333, 2304, x973,2304));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1333, 0.0f, 589824); float* x6217 = (float*)myMalloc(1 * sizeof(float));; x6217[0] = 1.0f; float* x6219 = (float*)myMalloc(1 * sizeof(float));; x6219[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 2048,10,x6217,x976,2048,x6219, x1334, 2048, x976,2048));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1334, 0.0f, 20480); float* x6223 = (float*)myMalloc(1 * sizeof(float));; x6223[0] = 1.0f; float* x6225 = (float*)myMalloc(1 * sizeof(float));; x6225[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6223,x979,1,x6225, x1335, 1, x979,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1335, 0.0f, 256); float* x6229 = (float*)myMalloc(1 * sizeof(float));; x6229[0] = 1.0f; float* x6231 = (float*)myMalloc(1 * sizeof(float));; x6231[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6229,x982,1,x6231, x1336, 1, x982,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1336, 0.0f, 256); float* x6235 = (float*)myMalloc(1 * sizeof(float));; x6235[0] = 1.0f; float* x6237 = (float*)myMalloc(1 * sizeof(float));; x6237[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6235,x985,1,x6237, x1337, 1, x985,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1337, 0.0f, 256); float* x6241 = (float*)myMalloc(1 * sizeof(float));; x6241[0] = 1.0f; float* x6243 = (float*)myMalloc(1 * sizeof(float));; x6243[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6241,x988,1,x6243, x1338, 1, x988,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1338, 0.0f, 1024); float* x6247 = (float*)myMalloc(1 * sizeof(float));; x6247[0] = 1.0f; float* x6249 = (float*)myMalloc(1 * sizeof(float));; x6249[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6247,x991,1,x6249, x1339, 1, x991,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1339, 0.0f, 1024); float* x6253 = (float*)myMalloc(1 * sizeof(float));; x6253[0] = 1.0f; float* x6255 = (float*)myMalloc(1 * sizeof(float));; x6255[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 64,64,x6253,x994,64,x6255, x1340, 64, x994,64));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1340, 0.0f, 4096); float* x6259 = (float*)myMalloc(1 * sizeof(float));; x6259[0] = 1.0f; float* x6261 = (float*)myMalloc(1 * sizeof(float));; x6261[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6259,x997,1,x6261, x1341, 1, x997,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1341, 0.0f, 512); float* x6265 = (float*)myMalloc(1 * sizeof(float));; x6265[0] = 1.0f; float* x6267 = (float*)myMalloc(1 * sizeof(float));; x6267[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1152,128,x6265,x1000,1152,x6267, x1342, 1152, x1000,1152));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1342, 0.0f, 147456); float* x6271 = (float*)myMalloc(1 * sizeof(float));; x6271[0] = 1.0f; float* x6273 = (float*)myMalloc(1 * sizeof(float));; x6273[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x6271,x1003,1,x6273, x1343, 1, x1003,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1343, 0.0f, 128); float* x6277 = (float*)myMalloc(1 * sizeof(float));; x6277[0] = 1.0f; float* x6279 = (float*)myMalloc(1 * sizeof(float));; x6279[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6277,x1006,1,x6279, x1344, 1, x1006,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1344, 0.0f, 256); float* x6283 = (float*)myMalloc(1 * sizeof(float));; x6283[0] = 1.0f; float* x6285 = (float*)myMalloc(1 * sizeof(float));; x6285[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6283,x1009,1,x6285, x1345, 1, x1009,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1345, 0.0f, 1024); float* x6289 = (float*)myMalloc(1 * sizeof(float));; x6289[0] = 1.0f; float* x6291 = (float*)myMalloc(1 * sizeof(float));; x6291[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x6289,x1012,1,x6291, x1346, 1, x1012,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1346, 0.0f, 2048); float* x6295 = (float*)myMalloc(1 * sizeof(float));; x6295[0] = 1.0f; float* x6297 = (float*)myMalloc(1 * sizeof(float));; x6297[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6295,x1015,1,x6297, x1347, 1, x1015,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1347, 0.0f, 256); float* x6301 = (float*)myMalloc(1 * sizeof(float));; x6301[0] = 1.0f; float* x6303 = (float*)myMalloc(1 * sizeof(float));; x6303[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6301,x1018,1,x6303, x1348, 1, x1018,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1348, 0.0f, 256); float* x6307 = (float*)myMalloc(1 * sizeof(float));; x6307[0] = 1.0f; float* x6309 = (float*)myMalloc(1 * sizeof(float));; x6309[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x6307,x1021,1,x6309, x1349, 1, x1021,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1349, 0.0f, 128); float* x6313 = (float*)myMalloc(1 * sizeof(float));; x6313[0] = 1.0f; float* x6315 = (float*)myMalloc(1 * sizeof(float));; x6315[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6313,x1024,1,x6315, x1350, 1, x1024,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1350, 0.0f, 256); float* x6319 = (float*)myMalloc(1 * sizeof(float));; x6319[0] = 1.0f; float* x6321 = (float*)myMalloc(1 * sizeof(float));; x6321[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x6319,x1027,1,x6321, x1351, 1, x1027,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1351, 0.0f, 64); float* x6325 = (float*)myMalloc(1 * sizeof(float));; x6325[0] = 1.0f; float* x6327 = (float*)myMalloc(1 * sizeof(float));; x6327[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x6325,x1030,1,x6327, x1352, 1, x1030,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1352, 0.0f, 2048); float* x6331 = (float*)myMalloc(1 * sizeof(float));; x6331[0] = 1.0f; float* x6333 = (float*)myMalloc(1 * sizeof(float));; x6333[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6331,x1033,1,x6333, x1353, 1, x1033,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1353, 0.0f, 512); float* x6337 = (float*)myMalloc(1 * sizeof(float));; x6337[0] = 1.0f; float* x6339 = (float*)myMalloc(1 * sizeof(float));; x6339[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6337,x1036,1,x6339, x1354, 1, x1036,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1354, 0.0f, 256); float* x6343 = (float*)myMalloc(1 * sizeof(float));; x6343[0] = 1.0f; float* x6345 = (float*)myMalloc(1 * sizeof(float));; x6345[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6343,x1039,1,x6345, x1355, 1, x1039,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1355, 0.0f, 1024); float* x6349 = (float*)myMalloc(1 * sizeof(float));; x6349[0] = 1.0f; float* x6351 = (float*)myMalloc(1 * sizeof(float));; x6351[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 2304,256,x6349,x1042,2304,x6351, x1356, 2304, x1042,2304));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1356, 0.0f, 589824); float* x6355 = (float*)myMalloc(1 * sizeof(float));; x6355[0] = 1.0f; float* x6357 = (float*)myMalloc(1 * sizeof(float));; x6357[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6355,x1045,1,x6357, x1357, 1, x1045,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1357, 0.0f, 256); float* x6361 = (float*)myMalloc(1 * sizeof(float));; x6361[0] = 1.0f; float* x6363 = (float*)myMalloc(1 * sizeof(float));; x6363[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x6361,x1048,1,x6363, x1358, 1, x1048,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1358, 0.0f, 64); float* x6367 = (float*)myMalloc(1 * sizeof(float));; x6367[0] = 1.0f; float* x6369 = (float*)myMalloc(1 * sizeof(float));; x6369[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x6367,x1051,1,x6369, x1359, 1, x1051,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1359, 0.0f, 128); float* x6373 = (float*)myMalloc(1 * sizeof(float));; x6373[0] = 1.0f; float* x6375 = (float*)myMalloc(1 * sizeof(float));; x6375[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6373,x1054,1,x6375, x1360, 1, x1054,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1360, 0.0f, 256); float* x6379 = (float*)myMalloc(1 * sizeof(float));; x6379[0] = 1.0f; float* x6381 = (float*)myMalloc(1 * sizeof(float));; x6381[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6379,x1057,1,x6381, x1361, 1, x1057,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1361, 0.0f, 256); float* x6385 = (float*)myMalloc(1 * sizeof(float));; x6385[0] = 1.0f; float* x6387 = (float*)myMalloc(1 * sizeof(float));; x6387[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,512,x6385,x1060,1,x6387, x1362, 1, x1060,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1362, 0.0f, 512); float* x6391 = (float*)myMalloc(1 * sizeof(float));; x6391[0] = 1.0f; float* x6393 = (float*)myMalloc(1 * sizeof(float));; x6393[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 512,128,x6391,x1063,512,x6393, x1363, 512, x1063,512));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1363, 0.0f, 65536); float* x6397 = (float*)myMalloc(1 * sizeof(float));; x6397[0] = 1.0f; float* x6399 = (float*)myMalloc(1 * sizeof(float));; x6399[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,64,x6397,x1066,1,x6399, x1364, 1, x1066,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1364, 0.0f, 64); float* x6403 = (float*)myMalloc(1 * sizeof(float));; x6403[0] = 1.0f; float* x6405 = (float*)myMalloc(1 * sizeof(float));; x6405[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 256,512,x6403,x1069,256,x6405, x1365, 256, x1069,256));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1365, 0.0f, 131072); float* x6409 = (float*)myMalloc(1 * sizeof(float));; x6409[0] = 1.0f; float* x6411 = (float*)myMalloc(1 * sizeof(float));; x6411[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6409,x1072,1,x6411, x1366, 1, x1072,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1366, 0.0f, 256); float* x6415 = (float*)myMalloc(1 * sizeof(float));; x6415[0] = 1.0f; float* x6417 = (float*)myMalloc(1 * sizeof(float));; x6417[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,2048,x6415,x1075,1,x6417, x1367, 1, x1075,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1367, 0.0f, 2048); float* x6421 = (float*)myMalloc(1 * sizeof(float));; x6421[0] = 1.0f; float* x6423 = (float*)myMalloc(1 * sizeof(float));; x6423[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x6421,x1078,1,x6423, x1368, 1, x1078,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1368, 0.0f, 128); float* x6427 = (float*)myMalloc(1 * sizeof(float));; x6427[0] = 1.0f; float* x6429 = (float*)myMalloc(1 * sizeof(float));; x6429[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 2304,256,x6427,x1081,2304,x6429, x1369, 2304, x1081,2304));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1369, 0.0f, 589824); float* x6433 = (float*)myMalloc(1 * sizeof(float));; x6433[0] = 1.0f; float* x6435 = (float*)myMalloc(1 * sizeof(float));; x6435[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6433,x1084,1,x6435, x1370, 1, x1084,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1370, 0.0f, 1024); float* x6439 = (float*)myMalloc(1 * sizeof(float));; x6439[0] = 1.0f; float* x6441 = (float*)myMalloc(1 * sizeof(float));; x6441[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6439,x1087,1,x6441, x1371, 1, x1087,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1371, 0.0f, 256); float* x6445 = (float*)myMalloc(1 * sizeof(float));; x6445[0] = 1.0f; float* x6447 = (float*)myMalloc(1 * sizeof(float));; x6447[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 2048,512,x6445,x1090,2048,x6447, x1372, 2048, x1090,2048));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1372, 0.0f, 1048576); float* x6451 = (float*)myMalloc(1 * sizeof(float));; x6451[0] = 1.0f; float* x6453 = (float*)myMalloc(1 * sizeof(float));; x6453[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x6451,x1093,1,x6453, x1373, 1, x1093,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1373, 0.0f, 128); float* x6457 = (float*)myMalloc(1 * sizeof(float));; x6457[0] = 1.0f; float* x6459 = (float*)myMalloc(1 * sizeof(float));; x6459[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6457,x1096,1,x6459, x1374, 1, x1096,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1374, 0.0f, 1024); float* x6463 = (float*)myMalloc(1 * sizeof(float));; x6463[0] = 1.0f; float* x6465 = (float*)myMalloc(1 * sizeof(float));; x6465[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,128,x6463,x1099,1,x6465, x1375, 1, x1099,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1375, 0.0f, 128); float* x6469 = (float*)myMalloc(1 * sizeof(float));; x6469[0] = 1.0f; float* x6471 = (float*)myMalloc(1 * sizeof(float));; x6471[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 256,1024,x6469,x1102,256,x6471, x1376, 256, x1102,256));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1376, 0.0f, 262144); float* x6475 = (float*)myMalloc(1 * sizeof(float));; x6475[0] = 1.0f; float* x6477 = (float*)myMalloc(1 * sizeof(float));; x6477[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6475,x1105,1,x6477, x1377, 1, x1105,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1377, 0.0f, 256); float* x6481 = (float*)myMalloc(1 * sizeof(float));; x6481[0] = 1.0f; float* x6483 = (float*)myMalloc(1 * sizeof(float));; x6483[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,256,x6481,x1108,1,x6483, x1378, 1, x1108,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1378, 0.0f, 256); float* x6487 = (float*)myMalloc(1 * sizeof(float));; x6487[0] = 1.0f; float* x6489 = (float*)myMalloc(1 * sizeof(float));; x6489[0] = -0.005f; CUBLAS_CALL(hipblasSgeam(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, 1,1024,x6487,x1111,1,x6489, x1379, 1, x1111,1));hipLaunchKernelGGL(( arrayFill), dim3(28), dim3(512), 0, 0, x1379, 0.0f, 1024); int32_t x6493 = x1396 + 1; int32_t x6495 = x6493 % x6494; bool x6496 = x6495 == 0; if (x6496) { float x6501 = x1390; double x6497 = (double)x1397; double x6498 = 100.0 * x6497; double x6500 = x6498 / x6499; float x6502 = (float)x1396; float x6503 = x6501 / x6502; printf("Train epoch %d: [%d/%d (%.0f%%)] Average Loss: %.6f\n",x1386,x1397,x11,x6500,x6503); fflush(stdout); } else { } int64_t x6508 = (long)mallocAddr; int64_t x6509 = x6508 - x1382; memset((void*)x1382, 0, x6509); mallocAddr = (void*)x1382; int64_t x6512 = (long)gpuMallocAddr; int64_t x6513 = x6512 - x1383; hipMemset((void*)x1383, 0, x6513); gpuMallocAddr = (void*)x1383; } gettimeofday(&end_1, NULL); timeval_subtract(&diff_1, &end_1, &begin_1);; int64_t x6520 = ((diff_1.tv_sec * 1000000L) + (diff_1.tv_usec)); double x6521 = (double)x6520; double x6522 = x6521 / 1000000.0; x1381[x1386] = x6522; int64_t x6524 = x6520 / 1000LL; int64_t x6526 = x6520 / x6525; printf("Training completed in %ldms (%ld us/images)\n",x6524,x6526); float x6528 = x1390; float x6530 = x6528 / x6529; double x6531 = (double)x6530; x1380[x1386] = x6531; } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; int64_t x6537 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); sort(x1381, x1381 + 4); double x6543 = x1381[2]; int64_t x6544 = (long)fopen(x0, "w"); fprintf((FILE *)x6544, "unit: %s\n", "1 epoch"); for(int x6546=0; x6546 < 4; x6546++) { double x6547 = x1380[x6546]; fprintf((FILE *)x6544, "%lf\n", x6547); } fprintf((FILE *)x6544, "run time: %lf %lf\n", x39, x6543); fclose((FILE*)x6544); // Backend cleanup. CUBLAS_CALL(hipblasDestroy(cublasHandle)); CUDA_CALL(hipFree(gpuMallocBase)); CUDNN_CALL(cudnnDestroy(cudnnHandle)); } /***************************************** End of C Generated Code *******************************************/
ef852ce47f5b84175d39cc159bb039dd55b08cbf.cu
#include <assert.h> #include <err.h> #include <errno.h> #include <fcntl.h> #include <functional> #include <math.h> #include <memory> #include <random> #include <stdint.h> #include <stdio.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #include <cblas.h> #include <algorithm> #include <numeric> #include <cuda.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <cudnn.h> using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif long fsize(int fd) { struct stat stat; int res = fstat(fd, &stat); return stat.st_size; } int printll(char *s) { while (*s != '\n' && *s != ',' && *s != '\t') { putchar(*s++); } return 0; } long hash(char *str0, int len) { unsigned char *str = (unsigned char *)str0; unsigned long hash = 5381; int c; while ((c = *str++) && len--) hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ return hash; } long HEAP_SIZE_CPU = 1073741826; // 1048576; // 536870912; // 268435456; // 2097152; 1610612739; // 4294967304; // void *mallocBase = calloc(HEAP_SIZE_CPU, 1); void *mallocAddr = mallocBase; void *waterMark = mallocBase; void *myMalloc(size_t bytes) { void *res = mallocAddr; mallocAddr = (void *)((char *)mallocAddr + bytes); if ((long)mallocAddr >= (long)mallocBase + HEAP_SIZE_CPU) fprintf(stderr, "CPU memory breached limit of HEAP_SIZE_CPU\n"); return res; } long HEAP_SIZE = 8589934608; // 4294967304; // this is for GPU int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) { long int diff = (t2->tv_usec + 1000000 * t2->tv_sec) - (t1->tv_usec + 1000000 * t1->tv_sec); result->tv_sec = diff / 1000000; result->tv_usec = diff % 1000000; return (diff < 0); } #define CUDA_CALL(f) { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error occurred: %s (%s:%d)\n", \ cudaGetErrorString(err), __FILE__, __LINE__); \ exit(err); \ } \ } #define CUBLAS_CALL(f) { \ cublasStatus_t stat = (f); \ if (stat != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "cuBLAS error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void *gpuMallocBase; void *gpuMallocAddr; // Alignment boundary size, in bytes. constexpr int N = 4; // 16 void *myGpuMalloc(size_t bytes) { bytes = ((bytes + (1 << N) - 1) >> N) << N; void *res = gpuMallocAddr; gpuMallocAddr = (void *)((char *)gpuMallocAddr + bytes); if ((long)gpuMallocAddr >= (long)gpuMallocBase + HEAP_SIZE) fprintf(stderr, "GPU breached memory limit of HEAP_SIZE\n"); return res; } template <typename T> __global__ void arrayUpdate(T *data, int index, T value) { data[index] = value; } __global__ void arrayFill(float* data, float value, int size) { int stride = gridDim.x * blockDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < size; i += stride) data[i] = value; } __global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]); } } __global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { if (inplace) { if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0; } else { if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i]; } } } __global__ void nllLoss(float *x, int x_stride, float *y, int* target) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; y[tid] = -1 * x[offset]; } __global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; xGrad[offset] += -1 * yGrad[tid]; } // only for 4D tensor in and 3D tensor out __global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < nElement; i += stride) { int inOff2 = i / inSize3; int inDim3 = i - inOff2 * inSize3; int inOff1 = inOff2 / inSize2; int inDim2 = inOff2 - inOff1 * inSize2; int inDim0 = inOff1 / inSize1; int inDim1 = inOff1 - inDim0 * inSize1; int outOff = 0; if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2; if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2; in[i] += out[outOff]; } } //following - https://github.com/torch/cutorch/blob/master/lib/THC/THCTensorMath.cuh#L49 static inline __device__ int compute(int outputSize0, int outputSize1, int outputSize2, int outputSize3, int outputStride0, int outputStride1, int outputStride2, int outputStride3, const int dimSize, const int concatDim, int linearIndex) { int offset = 0; int curDimSize = 3 == concatDim ? dimSize : outputSize3; int nextDimIndex = linearIndex / curDimSize; int curDimIndex = linearIndex - curDimSize * nextDimIndex; int curDimOffset = curDimIndex * outputStride3; offset += curDimOffset; linearIndex = nextDimIndex; curDimSize = 2 == concatDim ? dimSize : outputSize2; nextDimIndex = linearIndex / curDimSize; curDimIndex = linearIndex - curDimSize * nextDimIndex; curDimOffset = curDimIndex * outputStride2; offset += curDimOffset; linearIndex = nextDimIndex; curDimSize = 1 == concatDim ? dimSize : outputSize1; nextDimIndex = linearIndex / curDimSize; curDimIndex = linearIndex - curDimSize * nextDimIndex; curDimOffset = curDimIndex * outputStride1; offset += curDimOffset; linearIndex = nextDimIndex; return offset + linearIndex * outputStride0; // for (int i = 3; i >= 1; i--) { // int curDimSize = i == concatDim ? dimSize : outputSize[i]; // int nextDimIndex = linearIndex / curDimSize; // int curDimIndex = linearIndex - curDimSize * nextDimIndex; // int curDimOffset = curDimIndex * outputStride[i]; // offset += curDimOffset; // linearIndex = nextDimIndex; // } // return offset + linearIndex * outputStride[0]; } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; while (tid < nElement) { int elementOffset = compute(outSize0, outSize1, outSize2, outSize3, outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); out[dataOffset + elementOffset] = data[tid]; tid += stride; } } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg_grad(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; while (tid < nElement) { int elementOffset = compute(outSize0, outSize1, outSize2, outSize3, outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); data[tid] += out[dataOffset + elementOffset]; tid += stride; } } __global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < outScalarCount; tid += stride) { int linearIndex = tid; int outIndex0 = linearIndex / outStride0; linearIndex = linearIndex - outIndex0 * outStride0; int outIndex1 = linearIndex / outStride1; int outIndex2 = linearIndex - outIndex1 * outStride1; int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1; out[tid] = in[inIndex]; } } __global__ void shift0(float* in, float* out, int inDim0, int inStride0, int inStride1, int inScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < inScalarCount; tid += stride) { int linearIndex = tid; int inIndex0 = linearIndex / inStride0; linearIndex = linearIndex - inIndex0 * inStride0; int inIndex1 = linearIndex / inStride1; if (inIndex0 + inIndex1 >= inDim0) return; out[tid + inIndex1 * inStride0] = in[tid]; } } __global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { if (d[tid] > clip) d[tid] = clip; if (d[tid] < -clip) d[tid] = -clip; m[tid] += d[tid] * d[tid]; x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001); d[tid] = 0; } } __global__ void momentum_update_1D_1D(float* x, float* d, float* m, float learning_rate, float momentum, float gradClip, bool nesterov, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { float temp = d[tid]; if (temp > gradClip) temp = gradClip; if (temp < -gradClip) temp = -gradClip; m[tid] *= momentum; m[tid] += temp; if (nesterov) { temp += momentum * m[tid]; } else { temp = m[tid]; } x[tid] -= learning_rate * temp; d[tid] = 0; } } __global__ void addScalar(float* in, float* out, float add, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] + add; } __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; } __global__ void multScalar(float* in, float* out, float mult, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * mult; } __global__ void divScalar(float* in, float* out, float div, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] / div; } __global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_mul_mutate(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] += in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] + in2[tid]; } __global__ void elementwise_1D_1D_minus(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] - in2[tid]; } __global__ void elementwise_1D_1D_div(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] / in2[tid]; } __global__ void elementwise_1D_1D_exp(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = exp(in[tid]); } __global__ void elementwise_1D_1D_log(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = log(in[tid]); } __global__ void elementwise_1D_1D_sqrt(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = sqrt(in[tid]); } __global__ void elementwise_1D_1D_square(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * in[tid]; } __global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * out_x[tid]; } __global__ void elementwise_1D_1D_log_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / in_x[tid]; } __global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2; } __global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid]; } __global__ void clipAt(float* in, float bound, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) { if (in[tid] > bound) in[tid] = bound; if (in[tid] < -bound) in[tid] = -bound; } } __global__ void mask4D(float* in, int* mask, int xstrides0, int xstrides1, int xstrides2, int xstrides3, int scalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < scalarCount; tid += stride) { int linearIndex = tid; int xindex0 = linearIndex / xstrides0; linearIndex = linearIndex - xstrides0 * xindex0; int xindex1 = linearIndex / xstrides1; linearIndex = linearIndex - xstrides1 * xindex1; int xindex2 = linearIndex / xstrides2; int xindex3 = linearIndex - xstrides2 * xindex2; if (xindex3 >= mask[xindex0]) in[tid] = 0; } } __global__ void mul_sub(float* in1, float* in2, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { out[tid] = in1[tid] * in2[tid % in2ScalarCount]; } } __global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { int index = tid % in2ScalarCount; in1_d[tid] += out[tid] * in2_x[index]; in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced! } } // From: https://github.com/pytorch/pytorch/blob/master/aten/src/THC/THCIntegerDivider.cuh // Result of div/mod operation stored together. template <typename Value> struct DivMod { Value div, mod; __host__ __device__ DivMod(Value div, Value mod) : div(div), mod(mod) { } }; // Base case: we only have an implementation for uint32_t for now. For // everything else, we use plain division. template <typename Value> struct IntDivider { IntDivider() { } // Dummy constructor for arrays. IntDivider(Value d) : divisor(d) { } __host__ __device__ inline Value div(Value n) const { return n / divisor; } __host__ __device__ inline Value mod(Value n) const { return n % divisor; } __host__ __device__ inline DivMod<Value> divmod(Value n) const { return DivMod<Value>(n / divisor, n % divisor); } Value divisor; }; // Implement fast integer division. template <> struct IntDivider<unsigned int> { static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int."); IntDivider() { } // Dummy constructor for arrays. IntDivider(unsigned int d) : divisor(d) { assert(divisor >= 1 && divisor <= INT32_MAX); // TODO: gcc/clang has __builtin_clz() but it's not portable. for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break; uint64_t one = 1; uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1; m1 = magic; assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits. } __host__ __device__ inline unsigned int div(unsigned int n) const { #ifdef __CUDA_ARCH__ // 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and // 'm1'. unsigned int t = __umulhi(n, m1); return (t + n) >> shift; #else // Using uint64_t so that the addition does not overflow. uint64_t t = ((uint64_t) n * m1) >> 32; return (t + n) >> shift; #endif } __host__ __device__ inline unsigned int mod(unsigned int n) const { return n - div(n) * divisor; } __host__ __device__ inline DivMod<unsigned int> divmod(unsigned int n) const { unsigned int q = div(n); return DivMod<unsigned int>(q, n - q * divisor); } unsigned int divisor; // d above. unsigned int m1; // Magic number: m' above. unsigned int shift; // Shift amounts. }; // From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/OffsetCalculator.cuh /// OffsetCalculator calculates the offset in bytes of a linear index for NARGS /// operands that share the same shape, but may have different strides. template <int NARGS> struct OffsetCalculator { static constexpr int MAX_DIMS = 25; // The offset for each argument (in bytes). Wrapper around fixed-size array. struct offsets_t { __host__ __device__ uint32_t& operator[](int idx) { return values[idx]; } uint32_t values[NARGS]; }; // OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides) : dims(dims) { OffsetCalculator(int dims, const int32_t* sizes, const int32_t* const* strides) : dims(dims) { for (int i = 0; i < MAX_DIMS; ++i) { if (i < dims) { sizes_[i] = IntDivider<uint32_t>(sizes[i]); } else { sizes_[i] = IntDivider<uint32_t>(1); } for (int arg = 0; arg < NARGS; arg++) { strides_[i][arg] = i < dims ? strides[arg][i] : 0; } } } __host__ __device__ offsets_t get(uint32_t linear_idx) const { offsets_t offsets; #pragma unroll for (int arg = 0; arg < NARGS; arg++) { offsets[arg] = 0; } #pragma unroll for (int dim = 0; dim < MAX_DIMS; ++dim) { if (dim == dims) { break; } auto divmod = sizes_[dim].divmod(linear_idx); linear_idx = divmod.div; #pragma unroll for (int arg = 0; arg < NARGS; arg++) { offsets[arg] += divmod.mod * strides_[dim][arg]; } } return offsets; } void print() { for (auto i = 1; i < 128; i++) { auto offsets = get(i); printf("offsets[%d]: ", i); for (auto arg = 0; arg < NARGS; arg++) { printf("%d ", offsets[arg]); } printf("\n"); } } int dims; IntDivider<uint32_t> sizes_[MAX_DIMS]; uint32_t strides_[MAX_DIMS][NARGS]; }; // From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/Loops.cuh template<int nt, int vt, typename func_t> __launch_bounds__(nt, 4) __global__ void elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); elementwise_kernel<nt, vt, func_t><<<grid, block, 0>>>(N, f); } template<typename func_t> void gpu_unary_kernel(float *res, float *x, int32_t resRank, const int32_t resScalarCount, const int32_t* resShape, const int32_t* const* strides, const func_t& f) { OffsetCalculator<2> calc(resRank, resShape, strides); launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) { auto offsets = calc.get(idx); float* out = &res[offsets[0]]; float* in = &x[offsets[1]]; *out = f(*in); }); } template<typename func_t> void gpu_binary_kernel(float *res, float *x, float *y, int32_t resRank, const int32_t resScalarCount, const int32_t* resShape, const int32_t* const* strides, const func_t& f) { OffsetCalculator<3> calc(resRank, resShape, strides); launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) { auto offsets = calc.get(idx); float* out = &res[offsets[0]]; float* in1 = &x[offsets[1]]; float* in2 = &y[offsets[2]]; *out = f(*in1, *in2); }); } #define CUDNN_CALL(f) { \ cudnnStatus_t stat = (f); \ if (stat != CUDNN_STATUS_SUCCESS) { \ fprintf(stderr, "cuDNN error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void Snippet(char *); std::random_device rd{}; std::mt19937 gen{rd()}; std::normal_distribution<> d{0, 0.01}; int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: query <filename>\n"); return 0; } Snippet(argv[1]); return 0; } /***************************************** Emitting C Generated Code *******************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> void Snippet(char* x0) { // Backend setup. cublasHandle_t cublasHandle; CUBLAS_CALL(cublasCreate(&cublasHandle)); CUDA_CALL(cudaMalloc(&gpuMallocBase, HEAP_SIZE)); CUDA_CALL(cudaMemset(gpuMallocBase, 0, HEAP_SIZE)); gpuMallocAddr = gpuMallocBase; cudnnHandle_t cudnnHandle; CUDNN_CALL(cudnnCreate(&cudnnHandle)); srand(42); struct timeval begin_0, end_0, diff_0; gettimeofday(&begin_0, NULL); int32_t x7 = open("../../cifar10_data/cifar-10-batches-bin/data_batch_1.bin",0); int64_t x8 = fsize(x7); int64_t x10 = x8 / 3073LL; int32_t x11 = (int32_t)x10; int32_t x12 = x11 * 3072; float* x13 = (float*)myMalloc(x12 * sizeof(float));; int* x14 = (int32_t*)myMalloc(x11 * sizeof(int32_t));; char* x9 = (char*)mmap(0, x8, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x7, 0); for(int x16=0; x16 < x11; x16++) { int32_t x17 = x16 * 3073; char x18 = x9[x17]; int32_t x19 = (int32_t)(unsigned char)x18; x14[x16] = x19; int32_t x25 = x17 + 1; int32_t x23 = x16 * 3072; for(int x22=0; x22 < 3072; x22++) { int32_t x26 = x25 + x22; char x27 = x9[x26]; int32_t x24 = x23 + x22; float x28 = (float)(unsigned char)x27; float x29 = x28 / 255.0f; x13[x24] = x29; } } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; int64_t x37 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); float x38 = (float)x37; float x39 = x38 / 1000000.0f; printf("Data normalized (all prepare time) in %lf sec\n",x39); // Tensor 'toGPU' invocation. float* x313 = (float*)myGpuMalloc(262144 * sizeof(float)); int32_t x42 = open("/home/fei/bitbucket/Lantern/src/out/PLDI19evaluation/resnet50/resnet50.onnx.bin",0); int64_t x43 = fsize(x42); float* x44 = (float*)mmap(0, x43, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x42, 0); float* x45 = x44+5205440; CUDA_CALL(cudaMemcpy(x313, x45, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x316 = (float*)myGpuMalloc(256 * sizeof(float)); float* x46 = x44+148672; CUDA_CALL(cudaMemcpy(x316, x46, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x319 = (float*)myGpuMalloc(128 * sizeof(float)); float* x47 = x44+816064; CUDA_CALL(cudaMemcpy(x319, x47, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x322 = (float*)myGpuMalloc(128 * sizeof(float)); float* x48 = x44+950080; CUDA_CALL(cudaMemcpy(x322, x48, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x325 = (float*)myGpuMalloc(64 * sizeof(float)); float* x49 = x44+94784; CUDA_CALL(cudaMemcpy(x325, x49, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x328 = (float*)myGpuMalloc(32768 * sizeof(float)); float* x50 = x44+220608; CUDA_CALL(cudaMemcpy(x328, x50, 32768 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x331 = (float*)myGpuMalloc(512 * sizeof(float)); float* x51 = x44+22495680; CUDA_CALL(cudaMemcpy(x331, x51, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x334 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x52 = x44+2964928; CUDA_CALL(cudaMemcpy(x334, x52, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x337 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x53 = x44+4348352; CUDA_CALL(cudaMemcpy(x337, x53, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x340 = (float*)myGpuMalloc(512 * sizeof(float)); float* x54 = x44+20133312; CUDA_CALL(cudaMemcpy(x340, x54, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x343 = (float*)myGpuMalloc(256 * sizeof(float)); float* x55 = x44+2169536; CUDA_CALL(cudaMemcpy(x343, x55, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x346 = (float*)myGpuMalloc(128 * sizeof(float)); float* x56 = x44+668224; CUDA_CALL(cudaMemcpy(x346, x56, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x349 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x57 = x44+2432448; CUDA_CALL(cudaMemcpy(x349, x57, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x352 = (float*)myGpuMalloc(512 * sizeof(float)); float* x58 = x44+1446336; CUDA_CALL(cudaMemcpy(x352, x58, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x355 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x59 = x44+4081088; CUDA_CALL(cudaMemcpy(x355, x59, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x358 = (float*)myGpuMalloc(256 * sizeof(float)); float* x60 = x44+1578688; CUDA_CALL(cudaMemcpy(x358, x60, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x361 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x61 = x44+6325696; CUDA_CALL(cudaMemcpy(x361, x61, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x364 = (float*)myGpuMalloc(512 * sizeof(float)); float* x62 = x44+602048; CUDA_CALL(cudaMemcpy(x364, x62, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x367 = (float*)myGpuMalloc(64 * sizeof(float)); float* x63 = x44+165888; CUDA_CALL(cudaMemcpy(x367, x63, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x370 = (float*)myGpuMalloc(512 * sizeof(float)); float* x64 = x44+1164736; CUDA_CALL(cudaMemcpy(x370, x64, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x373 = (float*)myGpuMalloc(64 * sizeof(float)); float* x65 = x44+6080; CUDA_CALL(cudaMemcpy(x373, x65, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x376 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x66 = x44+253888; CUDA_CALL(cudaMemcpy(x376, x66, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x379 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x67 = x44+20135360; CUDA_CALL(cudaMemcpy(x379, x67, 2359296 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x382 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x68 = x44+2960832; CUDA_CALL(cudaMemcpy(x382, x68, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x385 = (float*)myGpuMalloc(256 * sizeof(float)); float* x69 = x44+3227072; CUDA_CALL(cudaMemcpy(x385, x69, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x388 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x70 = x44+3228096; CUDA_CALL(cudaMemcpy(x388, x70, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x391 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x71 = x44+43456; CUDA_CALL(cudaMemcpy(x391, x71, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x394 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x72 = x44+22496704; CUDA_CALL(cudaMemcpy(x394, x72, 1048576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x397 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x73 = x44+9092544; CUDA_CALL(cudaMemcpy(x397, x73, 2359296 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x400 = (float*)myGpuMalloc(128 * sizeof(float)); float* x74 = x44+816320; CUDA_CALL(cudaMemcpy(x400, x74, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x403 = (float*)myGpuMalloc(256 * sizeof(float)); float* x75 = x44+60608; CUDA_CALL(cudaMemcpy(x403, x75, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x406 = (float*)myGpuMalloc(256 * sizeof(float)); float* x76 = x44+219584; CUDA_CALL(cudaMemcpy(x406, x76, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x409 = (float*)myGpuMalloc(128 * sizeof(float)); float* x77 = x44+1379392; CUDA_CALL(cudaMemcpy(x409, x77, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x412 = (float*)myGpuMalloc(128 * sizeof(float)); float* x78 = x44+1231296; CUDA_CALL(cudaMemcpy(x412, x78, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x415 = (float*)myGpuMalloc(64 * sizeof(float)); float* x79 = x44+1856; CUDA_CALL(cudaMemcpy(x415, x79, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x418 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x80 = x44+1098176; CUDA_CALL(cudaMemcpy(x418, x80, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x421 = (float*)myGpuMalloc(512 * sizeof(float)); float* x81 = x44+601536; CUDA_CALL(cudaMemcpy(x421, x81, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x424 = (float*)myGpuMalloc(128 * sizeof(float)); float* x82 = x44+401728; CUDA_CALL(cudaMemcpy(x424, x82, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x427 = (float*)myGpuMalloc(64 * sizeof(float)); float* x83 = x44+131904; CUDA_CALL(cudaMemcpy(x427, x83, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x430 = (float*)myGpuMalloc(128 * sizeof(float)); float* x84 = x44+949696; CUDA_CALL(cudaMemcpy(x430, x84, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x433 = (float*)myGpuMalloc(512 * sizeof(float)); float* x85 = x44+15664576; CUDA_CALL(cudaMemcpy(x433, x85, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x436 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x86 = x44+18027968; CUDA_CALL(cudaMemcpy(x436, x86, 1048576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x439 = (float*)myGpuMalloc(10 * sizeof(float)); float* x87 = x44+23573952; CUDA_CALL(cudaMemcpy(x439, x87, 10 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x442 = (float*)myGpuMalloc(64 * sizeof(float)); float* x88 = x44+43264; CUDA_CALL(cudaMemcpy(x442, x88, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x445 = (float*)myGpuMalloc(512 * sizeof(float)); float* x89 = x44+11453376; CUDA_CALL(cudaMemcpy(x445, x89, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x448 = (float*)myGpuMalloc(64 * sizeof(float)); float* x90 = x44+6272; CUDA_CALL(cudaMemcpy(x448, x90, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x451 = (float*)myGpuMalloc(512 * sizeof(float)); float* x91 = x44+882112; CUDA_CALL(cudaMemcpy(x451, x91, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x454 = (float*)myGpuMalloc(64 * sizeof(float)); float* x92 = x44+6144; CUDA_CALL(cudaMemcpy(x454, x92, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x457 = (float*)myGpuMalloc(512 * sizeof(float)); float* x93 = x44+1445824; CUDA_CALL(cudaMemcpy(x457, x93, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x460 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x94 = x44+1379776; CUDA_CALL(cudaMemcpy(x460, x94, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x463 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x95 = x44+3818944; CUDA_CALL(cudaMemcpy(x463, x95, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x466 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x96 = x44+5202368; CUDA_CALL(cudaMemcpy(x466, x96, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x469 = (float*)myGpuMalloc(256 * sizeof(float)); float* x97 = x44+148416; CUDA_CALL(cudaMemcpy(x469, x97, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x472 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x98 = x44+7441856; CUDA_CALL(cudaMemcpy(x472, x98, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x475 = (float*)myGpuMalloc(64 * sizeof(float)); float* x99 = x44+94720; CUDA_CALL(cudaMemcpy(x475, x99, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x478 = (float*)myGpuMalloc(128 * sizeof(float)); float* x100 = x44+1097792; CUDA_CALL(cudaMemcpy(x478, x100, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x481 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x101 = x44+12504512; CUDA_CALL(cudaMemcpy(x481, x101, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x484 = (float*)myGpuMalloc(256 * sizeof(float)); float* x102 = x44+4938944; CUDA_CALL(cudaMemcpy(x484, x102, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x487 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x103 = x44+14611904; CUDA_CALL(cudaMemcpy(x487, x103, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x490 = (float*)myGpuMalloc(512 * sizeof(float)); float* x104 = x44+15666112; CUDA_CALL(cudaMemcpy(x490, x104, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x493 = (float*)myGpuMalloc(512 * sizeof(float)); float* x105 = x44+18026432; CUDA_CALL(cudaMemcpy(x493, x105, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x496 = (float*)myGpuMalloc(512 * sizeof(float)); float* x106 = x44+9091520; CUDA_CALL(cudaMemcpy(x496, x106, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x499 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x107 = x44+19080640; CUDA_CALL(cudaMemcpy(x499, x107, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x502 = (float*)myGpuMalloc(256 * sizeof(float)); float* x108 = x44+6588608; CUDA_CALL(cudaMemcpy(x502, x108, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x505 = (float*)myGpuMalloc(256 * sizeof(float)); float* x109 = x44+8299456; CUDA_CALL(cudaMemcpy(x505, x109, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x508 = (float*)myGpuMalloc(256 * sizeof(float)); float* x110 = x44+60352; CUDA_CALL(cudaMemcpy(x508, x110, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x511 = (float*)myGpuMalloc(64 * sizeof(float)); float* x111 = x44+202944; CUDA_CALL(cudaMemcpy(x511, x111, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x514 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x112 = x44+166080; CUDA_CALL(cudaMemcpy(x514, x112, 36864 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x517 = (float*)myGpuMalloc(256 * sizeof(float)); float* x113 = x44+6058432; CUDA_CALL(cudaMemcpy(x517, x113, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x520 = (float*)myGpuMalloc(524288 * sizeof(float)); float* x114 = x44+2436544; CUDA_CALL(cudaMemcpy(x520, x114, 524288 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x523 = (float*)myGpuMalloc(256 * sizeof(float)); float* x115 = x44+77248; CUDA_CALL(cudaMemcpy(x523, x115, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x526 = (float*)myGpuMalloc(256 * sizeof(float)); float* x116 = x44+6587840; CUDA_CALL(cudaMemcpy(x526, x116, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x529 = (float*)myGpuMalloc(512 * sizeof(float)); float* x117 = x44+20133824; CUDA_CALL(cudaMemcpy(x529, x117, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x532 = (float*)myGpuMalloc(128 * sizeof(float)); float* x118 = x44+1379264; CUDA_CALL(cudaMemcpy(x532, x118, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x535 = (float*)myGpuMalloc(256 * sizeof(float)); float* x119 = x44+7708608; CUDA_CALL(cudaMemcpy(x535, x119, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x538 = (float*)myGpuMalloc(64 * sizeof(float)); float* x120 = x44+165824; CUDA_CALL(cudaMemcpy(x538, x120, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x541 = (float*)myGpuMalloc(512 * sizeof(float)); float* x121 = x44+1164224; CUDA_CALL(cudaMemcpy(x541, x121, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x544 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x122 = x44+94912; CUDA_CALL(cudaMemcpy(x544, x122, 36864 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x547 = (float*)myGpuMalloc(128 * sizeof(float)); float* x123 = x44+253376; CUDA_CALL(cudaMemcpy(x547, x123, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x550 = (float*)myGpuMalloc(256 * sizeof(float)); float* x124 = x44+7708096; CUDA_CALL(cudaMemcpy(x550, x124, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x553 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x125 = x44+2962880; CUDA_CALL(cudaMemcpy(x553, x125, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x556 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x126 = x44+203200; CUDA_CALL(cudaMemcpy(x556, x126, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x559 = (float*)myGpuMalloc(512 * sizeof(float)); float* x127 = x44+883648; CUDA_CALL(cudaMemcpy(x559, x127, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x562 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x128 = x44+6059456; CUDA_CALL(cudaMemcpy(x562, x128, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x565 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x129 = x44+6336; CUDA_CALL(cudaMemcpy(x565, x129, 36864 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x568 = (float*)myGpuMalloc(256 * sizeof(float)); float* x130 = x44+148928; CUDA_CALL(cudaMemcpy(x568, x130, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x571 = (float*)myGpuMalloc(256 * sizeof(float)); float* x131 = x44+5467584; CUDA_CALL(cudaMemcpy(x571, x131, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x574 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x132 = x44+8563136; CUDA_CALL(cudaMemcpy(x574, x132, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x577 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x133 = x44+19076544; CUDA_CALL(cudaMemcpy(x577, x133, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x580 = (float*)myGpuMalloc(128 * sizeof(float)); float* x134 = x44+816192; CUDA_CALL(cudaMemcpy(x580, x134, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x583 = (float*)myGpuMalloc(256 * sizeof(float)); float* x135 = x44+3818176; CUDA_CALL(cudaMemcpy(x583, x135, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x586 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x136 = x44+8299968; CUDA_CALL(cudaMemcpy(x586, x136, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x589 = (float*)myGpuMalloc(256 * sizeof(float)); float* x137 = x44+5468352; CUDA_CALL(cudaMemcpy(x589, x137, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x592 = (float*)myGpuMalloc(256 * sizeof(float)); float* x138 = x44+2170048; CUDA_CALL(cudaMemcpy(x592, x138, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x595 = (float*)myGpuMalloc(128 * sizeof(float)); float* x139 = x44+668352; CUDA_CALL(cudaMemcpy(x595, x139, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x598 = (float*)myGpuMalloc(512 * sizeof(float)); float* x140 = x44+468928; CUDA_CALL(cudaMemcpy(x598, x140, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x601 = (float*)myGpuMalloc(64 * sizeof(float)); float* x141 = x44+94848; CUDA_CALL(cudaMemcpy(x601, x141, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x604 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x142 = x44+23545280; CUDA_CALL(cudaMemcpy(x604, x142, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x607 = (float*)myGpuMalloc(256 * sizeof(float)); float* x143 = x44+7179456; CUDA_CALL(cudaMemcpy(x607, x143, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x610 = (float*)myGpuMalloc(64 * sizeof(float)); float* x144 = x44+43328; CUDA_CALL(cudaMemcpy(x610, x144, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x613 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x145 = x44+401856; CUDA_CALL(cudaMemcpy(x613, x145, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x616 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x146 = x44+14609856; CUDA_CALL(cudaMemcpy(x616, x146, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x619 = (float*)myGpuMalloc(256 * sizeof(float)); float* x147 = x44+2169280; CUDA_CALL(cudaMemcpy(x619, x147, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x622 = (float*)myGpuMalloc(256 * sizeof(float)); float* x148 = x44+7178944; CUDA_CALL(cudaMemcpy(x622, x148, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x625 = (float*)myGpuMalloc(64 * sizeof(float)); float* x149 = x44+1920; CUDA_CALL(cudaMemcpy(x625, x149, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x628 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x150 = x44+816576; CUDA_CALL(cudaMemcpy(x628, x150, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x631 = (float*)myGpuMalloc(128 * sizeof(float)); float* x151 = x44+949952; CUDA_CALL(cudaMemcpy(x631, x151, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x634 = (float*)myGpuMalloc(512 * sizeof(float)); float* x152 = x44+11452864; CUDA_CALL(cudaMemcpy(x634, x152, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x637 = (float*)myGpuMalloc(64 * sizeof(float)); float* x153 = x44+6208; CUDA_CALL(cudaMemcpy(x637, x153, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x640 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x154 = x44+12506560; CUDA_CALL(cudaMemcpy(x640, x154, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x643 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x155 = x44+4939200; CUDA_CALL(cudaMemcpy(x643, x155, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x646 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x156 = x44+2433472; CUDA_CALL(cudaMemcpy(x646, x156, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x649 = (float*)myGpuMalloc(64 * sizeof(float)); float* x157 = x44+203136; CUDA_CALL(cudaMemcpy(x649, x157, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x652 = (float*)myGpuMalloc(512 * sizeof(float)); float* x158 = x44+601024; CUDA_CALL(cudaMemcpy(x652, x158, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x655 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x159 = x44+7442880; CUDA_CALL(cudaMemcpy(x655, x159, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x658 = (float*)myGpuMalloc(512 * sizeof(float)); float* x160 = x44+9092032; CUDA_CALL(cudaMemcpy(x658, x160, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x661 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x161 = x44+8564160; CUDA_CALL(cudaMemcpy(x661, x161, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x664 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x162 = x44+23551424; CUDA_CALL(cudaMemcpy(x664, x162, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x667 = (float*)myGpuMalloc(256 * sizeof(float)); float* x163 = x44+4938688; CUDA_CALL(cudaMemcpy(x667, x163, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x670 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x164 = x44+14613952; CUDA_CALL(cudaMemcpy(x670, x164, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x673 = (float*)myGpuMalloc(256 * sizeof(float)); float* x165 = x44+60096; CUDA_CALL(cudaMemcpy(x673, x165, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x676 = (float*)myGpuMalloc(128 * sizeof(float)); float* x166 = x44+1097664; CUDA_CALL(cudaMemcpy(x676, x166, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x679 = (float*)myGpuMalloc(128 * sizeof(float)); float* x167 = x44+401600; CUDA_CALL(cudaMemcpy(x679, x167, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x682 = (float*)myGpuMalloc(256 * sizeof(float)); float* x168 = x44+4347328; CUDA_CALL(cudaMemcpy(x682, x168, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x685 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x169 = x44+132032; CUDA_CALL(cudaMemcpy(x685, x169, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x688 = (float*)myGpuMalloc(256 * sizeof(float)); float* x170 = x44+1578944; CUDA_CALL(cudaMemcpy(x688, x170, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x691 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x171 = x44+1165760; CUDA_CALL(cudaMemcpy(x691, x171, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x694 = (float*)myGpuMalloc(256 * sizeof(float)); float* x172 = x44+220352; CUDA_CALL(cudaMemcpy(x694, x172, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x697 = (float*)myGpuMalloc(128 * sizeof(float)); float* x173 = x44+253760; CUDA_CALL(cudaMemcpy(x697, x173, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x700 = (float*)myGpuMalloc(64 * sizeof(float)); float* x174 = x44+203008; CUDA_CALL(cudaMemcpy(x700, x174, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x703 = (float*)myGpuMalloc(256 * sizeof(float)); float* x175 = x44+6058688; CUDA_CALL(cudaMemcpy(x703, x175, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x706 = (float*)myGpuMalloc(512 * sizeof(float)); float* x176 = x44+15665088; CUDA_CALL(cudaMemcpy(x706, x176, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x709 = (float*)myGpuMalloc(512 * sizeof(float)); float* x177 = x44+18026944; CUDA_CALL(cudaMemcpy(x709, x177, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x712 = (float*)myGpuMalloc(524288 * sizeof(float)); float* x178 = x44+8566208; CUDA_CALL(cudaMemcpy(x712, x178, 524288 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x715 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x179 = x44+5203392; CUDA_CALL(cudaMemcpy(x715, x179, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x718 = (float*)myGpuMalloc(256 * sizeof(float)); float* x180 = x44+8298944; CUDA_CALL(cudaMemcpy(x718, x180, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x721 = (float*)myGpuMalloc(64 * sizeof(float)); float* x181 = x44+94656; CUDA_CALL(cudaMemcpy(x721, x181, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x724 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x182 = x44+4084160; CUDA_CALL(cudaMemcpy(x724, x182, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x727 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x183 = x44+19078592; CUDA_CALL(cudaMemcpy(x727, x183, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x730 = (float*)myGpuMalloc(512 * sizeof(float)); float* x184 = x44+467392; CUDA_CALL(cudaMemcpy(x730, x184, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x733 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x185 = x44+6322624; CUDA_CALL(cudaMemcpy(x733, x185, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x736 = (float*)myGpuMalloc(512 * sizeof(float)); float* x186 = x44+883136; CUDA_CALL(cudaMemcpy(x736, x186, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x739 = (float*)myGpuMalloc(128 * sizeof(float)); float* x187 = x44+1379648; CUDA_CALL(cudaMemcpy(x739, x187, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x742 = (float*)myGpuMalloc(512 * sizeof(float)); float* x188 = x44+468416; CUDA_CALL(cudaMemcpy(x742, x188, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x745 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x189 = x44+149440; CUDA_CALL(cudaMemcpy(x745, x189, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x748 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x190 = x44+7445952; CUDA_CALL(cudaMemcpy(x748, x190, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x751 = (float*)myGpuMalloc(1728 * sizeof(float)); float* x191 = x44+0; CUDA_CALL(cudaMemcpy(x751, x191, 1728 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x754 = (float*)myGpuMalloc(64 * sizeof(float)); float* x192 = x44+131840; CUDA_CALL(cudaMemcpy(x754, x192, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x757 = (float*)myGpuMalloc(512 * sizeof(float)); float* x193 = x44+15665600; CUDA_CALL(cudaMemcpy(x757, x193, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x760 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x194 = x44+15666624; CUDA_CALL(cudaMemcpy(x760, x194, 2359296 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x763 = (float*)myGpuMalloc(512 * sizeof(float)); float* x195 = x44+1445312; CUDA_CALL(cudaMemcpy(x763, x195, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x766 = (float*)myGpuMalloc(256 * sizeof(float)); float* x196 = x44+3227840; CUDA_CALL(cudaMemcpy(x766, x196, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x769 = (float*)myGpuMalloc(64 * sizeof(float)); float* x197 = x44+43392; CUDA_CALL(cudaMemcpy(x769, x197, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x772 = (float*)myGpuMalloc(512 * sizeof(float)); float* x198 = x44+11452352; CUDA_CALL(cudaMemcpy(x772, x198, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x775 = (float*)myGpuMalloc(512 * sizeof(float)); float* x199 = x44+18025920; CUDA_CALL(cudaMemcpy(x775, x199, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x778 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x200 = x44+6324672; CUDA_CALL(cudaMemcpy(x778, x200, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x781 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x201 = x44+60864; CUDA_CALL(cudaMemcpy(x781, x201, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x784 = (float*)myGpuMalloc(256 * sizeof(float)); float* x202 = x44+5468096; CUDA_CALL(cudaMemcpy(x784, x202, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x787 = (float*)myGpuMalloc(64 * sizeof(float)); float* x203 = x44+43200; CUDA_CALL(cudaMemcpy(x787, x203, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x790 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x204 = x44+1231808; CUDA_CALL(cudaMemcpy(x790, x204, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x793 = (float*)myGpuMalloc(256 * sizeof(float)); float* x205 = x44+149184; CUDA_CALL(cudaMemcpy(x793, x205, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x796 = (float*)myGpuMalloc(512 * sizeof(float)); float* x206 = x44+1163712; CUDA_CALL(cudaMemcpy(x796, x206, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x799 = (float*)myGpuMalloc(256 * sizeof(float)); float* x207 = x44+7178688; CUDA_CALL(cudaMemcpy(x799, x207, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x802 = (float*)myGpuMalloc(512 * sizeof(float)); float* x208 = x44+22495168; CUDA_CALL(cudaMemcpy(x802, x208, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x805 = (float*)myGpuMalloc(128 * sizeof(float)); float* x209 = x44+949824; CUDA_CALL(cudaMemcpy(x805, x209, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x808 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x210 = x44+78272; CUDA_CALL(cudaMemcpy(x808, x210, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x811 = (float*)myGpuMalloc(128 * sizeof(float)); float* x211 = x44+253504; CUDA_CALL(cudaMemcpy(x811, x211, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x814 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x212 = x44+14607808; CUDA_CALL(cudaMemcpy(x814, x212, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x817 = (float*)myGpuMalloc(256 * sizeof(float)); float* x213 = x44+4348096; CUDA_CALL(cudaMemcpy(x817, x213, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x820 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x214 = x44+1579456; CUDA_CALL(cudaMemcpy(x820, x214, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x823 = (float*)myGpuMalloc(256 * sizeof(float)); float* x215 = x44+7708864; CUDA_CALL(cudaMemcpy(x823, x215, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x826 = (float*)myGpuMalloc(128 * sizeof(float)); float* x216 = x44+668480; CUDA_CALL(cudaMemcpy(x826, x216, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x829 = (float*)myGpuMalloc(256 * sizeof(float)); float* x217 = x44+4347840; CUDA_CALL(cudaMemcpy(x829, x217, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x832 = (float*)myGpuMalloc(64 * sizeof(float)); float* x218 = x44+203072; CUDA_CALL(cudaMemcpy(x832, x218, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x835 = (float*)myGpuMalloc(131072 * sizeof(float)); float* x219 = x44+1447360; CUDA_CALL(cudaMemcpy(x835, x219, 131072 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x838 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x220 = x44+23547328; CUDA_CALL(cudaMemcpy(x838, x220, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x841 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x221 = x44+4083136; CUDA_CALL(cudaMemcpy(x841, x221, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x844 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x222 = x44+8565184; CUDA_CALL(cudaMemcpy(x844, x222, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x847 = (float*)myGpuMalloc(256 * sizeof(float)); float* x223 = x44+220096; CUDA_CALL(cudaMemcpy(x847, x223, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x850 = (float*)myGpuMalloc(256 * sizeof(float)); float* x224 = x44+6588096; CUDA_CALL(cudaMemcpy(x850, x224, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x853 = (float*)myGpuMalloc(256 * sizeof(float)); float* x225 = x44+6058944; CUDA_CALL(cudaMemcpy(x853, x225, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x856 = (float*)myGpuMalloc(64 * sizeof(float)); float* x226 = x44+166016; CUDA_CALL(cudaMemcpy(x856, x226, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x859 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x227 = x44+5204416; CUDA_CALL(cudaMemcpy(x859, x227, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x862 = (float*)myGpuMalloc(256 * sizeof(float)); float* x228 = x44+8299200; CUDA_CALL(cudaMemcpy(x862, x228, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x865 = (float*)myGpuMalloc(128 * sizeof(float)); float* x229 = x44+401472; CUDA_CALL(cudaMemcpy(x865, x229, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x868 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x230 = x44+950208; CUDA_CALL(cudaMemcpy(x868, x230, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x871 = (float*)myGpuMalloc(256 * sizeof(float)); float* x231 = x44+4938432; CUDA_CALL(cudaMemcpy(x871, x231, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x874 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x232 = x44+12508608; CUDA_CALL(cudaMemcpy(x874, x232, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x877 = (float*)myGpuMalloc(512 * sizeof(float)); float* x233 = x44+22494656; CUDA_CALL(cudaMemcpy(x877, x233, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x880 = (float*)myGpuMalloc(512 * sizeof(float)); float* x234 = x44+18027456; CUDA_CALL(cudaMemcpy(x880, x234, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x883 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x235 = x44+884160; CUDA_CALL(cudaMemcpy(x883, x235, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x886 = (float*)myGpuMalloc(256 * sizeof(float)); float* x236 = x44+4347584; CUDA_CALL(cudaMemcpy(x886, x236, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x889 = (float*)myGpuMalloc(256 * sizeof(float)); float* x237 = x44+1579200; CUDA_CALL(cudaMemcpy(x889, x237, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x892 = (float*)myGpuMalloc(256 * sizeof(float)); float* x238 = x44+59840; CUDA_CALL(cudaMemcpy(x892, x238, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x895 = (float*)myGpuMalloc(256 * sizeof(float)); float* x239 = x44+3818432; CUDA_CALL(cudaMemcpy(x895, x239, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x898 = (float*)myGpuMalloc(512 * sizeof(float)); float* x240 = x44+9090496; CUDA_CALL(cudaMemcpy(x898, x240, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x901 = (float*)myGpuMalloc(512 * sizeof(float)); float* x241 = x44+22496192; CUDA_CALL(cudaMemcpy(x901, x241, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x904 = (float*)myGpuMalloc(256 * sizeof(float)); float* x242 = x44+77504; CUDA_CALL(cudaMemcpy(x904, x242, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x907 = (float*)myGpuMalloc(128 * sizeof(float)); float* x243 = x44+253632; CUDA_CALL(cudaMemcpy(x907, x243, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x910 = (float*)myGpuMalloc(512 * sizeof(float)); float* x244 = x44+11451840; CUDA_CALL(cudaMemcpy(x910, x244, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x913 = (float*)myGpuMalloc(64 * sizeof(float)); float* x245 = x44+1728; CUDA_CALL(cudaMemcpy(x913, x245, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x916 = (float*)myGpuMalloc(512 * sizeof(float)); float* x246 = x44+600512; CUDA_CALL(cudaMemcpy(x916, x246, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x919 = (float*)myGpuMalloc(64 * sizeof(float)); float* x247 = x44+131776; CUDA_CALL(cudaMemcpy(x919, x247, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x922 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x248 = x44+7443904; CUDA_CALL(cudaMemcpy(x922, x248, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x925 = (float*)myGpuMalloc(512 * sizeof(float)); float* x249 = x44+467904; CUDA_CALL(cudaMemcpy(x925, x249, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x928 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x250 = x44+2963904; CUDA_CALL(cudaMemcpy(x928, x250, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x931 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x251 = x44+11453888; CUDA_CALL(cudaMemcpy(x931, x251, 1048576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x934 = (float*)myGpuMalloc(512 * sizeof(float)); float* x252 = x44+20134336; CUDA_CALL(cudaMemcpy(x934, x252, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x937 = (float*)myGpuMalloc(2097152 * sizeof(float)); float* x253 = x44+12510656; CUDA_CALL(cudaMemcpy(x937, x253, 2097152 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x940 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x254 = x44+14616000; CUDA_CALL(cudaMemcpy(x940, x254, 1048576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x943 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x255 = x44+2434496; CUDA_CALL(cudaMemcpy(x943, x255, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x946 = (float*)myGpuMalloc(128 * sizeof(float)); float* x256 = x44+1097920; CUDA_CALL(cudaMemcpy(x946, x256, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x949 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x257 = x44+4085184; CUDA_CALL(cudaMemcpy(x949, x257, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x952 = (float*)myGpuMalloc(256 * sizeof(float)); float* x258 = x44+3227328; CUDA_CALL(cudaMemcpy(x952, x258, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x955 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x259 = x44+2961856; CUDA_CALL(cudaMemcpy(x955, x259, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x958 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x260 = x44+7179712; CUDA_CALL(cudaMemcpy(x958, x260, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x961 = (float*)myGpuMalloc(128 * sizeof(float)); float* x261 = x44+668096; CUDA_CALL(cudaMemcpy(x961, x261, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x964 = (float*)myGpuMalloc(512 * sizeof(float)); float* x262 = x44+1165248; CUDA_CALL(cudaMemcpy(x964, x262, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x967 = (float*)myGpuMalloc(512 * sizeof(float)); float* x263 = x44+9091008; CUDA_CALL(cudaMemcpy(x967, x263, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x970 = (float*)myGpuMalloc(128 * sizeof(float)); float* x264 = x44+816448; CUDA_CALL(cudaMemcpy(x970, x264, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x973 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x265 = x44+7709120; CUDA_CALL(cudaMemcpy(x973, x265, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x976 = (float*)myGpuMalloc(20480 * sizeof(float)); float* x266 = x44+23553472; CUDA_CALL(cudaMemcpy(x976, x266, 20480 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x979 = (float*)myGpuMalloc(256 * sizeof(float)); float* x267 = x44+4938176; CUDA_CALL(cudaMemcpy(x979, x267, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x982 = (float*)myGpuMalloc(256 * sizeof(float)); float* x268 = x44+2169792; CUDA_CALL(cudaMemcpy(x982, x268, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x985 = (float*)myGpuMalloc(256 * sizeof(float)); float* x269 = x44+6059200; CUDA_CALL(cudaMemcpy(x985, x269, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x988 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x270 = x44+6323648; CUDA_CALL(cudaMemcpy(x988, x270, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x991 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x271 = x44+4082112; CUDA_CALL(cudaMemcpy(x991, x271, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x994 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x272 = x44+1984; CUDA_CALL(cudaMemcpy(x994, x272, 4096 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x997 = (float*)myGpuMalloc(512 * sizeof(float)); float* x273 = x44+1446848; CUDA_CALL(cudaMemcpy(x997, x273, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1000 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x274 = x44+668608; CUDA_CALL(cudaMemcpy(x1000, x274, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1003 = (float*)myGpuMalloc(128 * sizeof(float)); float* x275 = x44+1231552; CUDA_CALL(cudaMemcpy(x1003, x275, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1006 = (float*)myGpuMalloc(256 * sizeof(float)); float* x276 = x44+3818688; CUDA_CALL(cudaMemcpy(x1006, x276, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1009 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x277 = x44+6321600; CUDA_CALL(cudaMemcpy(x1009, x277, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1012 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x278 = x44+12502464; CUDA_CALL(cudaMemcpy(x1012, x278, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1015 = (float*)myGpuMalloc(256 * sizeof(float)); float* x279 = x44+8299712; CUDA_CALL(cudaMemcpy(x1015, x279, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1018 = (float*)myGpuMalloc(256 * sizeof(float)); float* x280 = x44+5467840; CUDA_CALL(cudaMemcpy(x1018, x280, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1021 = (float*)myGpuMalloc(128 * sizeof(float)); float* x281 = x44+1231424; CUDA_CALL(cudaMemcpy(x1021, x281, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1024 = (float*)myGpuMalloc(256 * sizeof(float)); float* x282 = x44+78016; CUDA_CALL(cudaMemcpy(x1024, x282, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1027 = (float*)myGpuMalloc(64 * sizeof(float)); float* x283 = x44+131968; CUDA_CALL(cudaMemcpy(x1027, x283, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1030 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x284 = x44+19082688; CUDA_CALL(cudaMemcpy(x1030, x284, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1033 = (float*)myGpuMalloc(512 * sizeof(float)); float* x285 = x44+882624; CUDA_CALL(cudaMemcpy(x1033, x285, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1036 = (float*)myGpuMalloc(256 * sizeof(float)); float* x286 = x44+219840; CUDA_CALL(cudaMemcpy(x1036, x286, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1039 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x287 = x44+8562112; CUDA_CALL(cudaMemcpy(x1039, x287, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1042 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x288 = x44+5468608; CUDA_CALL(cudaMemcpy(x1042, x288, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1045 = (float*)myGpuMalloc(256 * sizeof(float)); float* x289 = x44+7179200; CUDA_CALL(cudaMemcpy(x1045, x289, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1048 = (float*)myGpuMalloc(64 * sizeof(float)); float* x290 = x44+1792; CUDA_CALL(cudaMemcpy(x1048, x290, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1051 = (float*)myGpuMalloc(128 * sizeof(float)); float* x291 = x44+401344; CUDA_CALL(cudaMemcpy(x1051, x291, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1054 = (float*)myGpuMalloc(256 * sizeof(float)); float* x292 = x44+7708352; CUDA_CALL(cudaMemcpy(x1054, x292, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1057 = (float*)myGpuMalloc(256 * sizeof(float)); float* x293 = x44+6588352; CUDA_CALL(cudaMemcpy(x1057, x293, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1060 = (float*)myGpuMalloc(512 * sizeof(float)); float* x294 = x44+20134848; CUDA_CALL(cudaMemcpy(x1060, x294, 512 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1063 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x295 = x44+602560; CUDA_CALL(cudaMemcpy(x1063, x295, 65536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1066 = (float*)myGpuMalloc(64 * sizeof(float)); float* x296 = x44+165952; CUDA_CALL(cudaMemcpy(x1066, x296, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1069 = (float*)myGpuMalloc(131072 * sizeof(float)); float* x297 = x44+469440; CUDA_CALL(cudaMemcpy(x1069, x297, 131072 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1072 = (float*)myGpuMalloc(256 * sizeof(float)); float* x298 = x44+3227584; CUDA_CALL(cudaMemcpy(x1072, x298, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1075 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x299 = x44+23549376; CUDA_CALL(cudaMemcpy(x1075, x299, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1078 = (float*)myGpuMalloc(128 * sizeof(float)); float* x300 = x44+1231680; CUDA_CALL(cudaMemcpy(x1078, x300, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1081 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x301 = x44+6588864; CUDA_CALL(cudaMemcpy(x1081, x301, 589824 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1084 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x302 = x44+5201344; CUDA_CALL(cudaMemcpy(x1084, x302, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1087 = (float*)myGpuMalloc(256 * sizeof(float)); float* x303 = x44+77760; CUDA_CALL(cudaMemcpy(x1087, x303, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1090 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x304 = x44+19084736; CUDA_CALL(cudaMemcpy(x1090, x304, 1048576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1093 = (float*)myGpuMalloc(128 * sizeof(float)); float* x305 = x44+1098048; CUDA_CALL(cudaMemcpy(x1093, x305, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1096 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x306 = x44+2435520; CUDA_CALL(cudaMemcpy(x1096, x306, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1099 = (float*)myGpuMalloc(128 * sizeof(float)); float* x307 = x44+1379520; CUDA_CALL(cudaMemcpy(x1099, x307, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1102 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x308 = x44+2170304; CUDA_CALL(cudaMemcpy(x1102, x308, 262144 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1105 = (float*)myGpuMalloc(256 * sizeof(float)); float* x309 = x44+1578432; CUDA_CALL(cudaMemcpy(x1105, x309, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1108 = (float*)myGpuMalloc(256 * sizeof(float)); float* x310 = x44+3817920; CUDA_CALL(cudaMemcpy(x1108, x310, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x1111 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x311 = x44+7444928; CUDA_CALL(cudaMemcpy(x1111, x311, 1024 * sizeof(float), cudaMemcpyHostToDevice)); float* x1113 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1114 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1115 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1116 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1117 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1118 = (float*)myGpuMalloc(32768 * sizeof(float)); float* x1119 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1120 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1121 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1122 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1123 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1124 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1125 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1126 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1127 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1128 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1129 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1130 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1131 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1132 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1133 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1134 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x1135 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x1136 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1137 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1138 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1139 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1140 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x1141 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x1142 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1143 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1144 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1145 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1146 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1147 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1148 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1149 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1150 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1151 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1152 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1153 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1154 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x1155 = (float*)myGpuMalloc(10 * sizeof(float)); float* x1156 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1157 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1158 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1159 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1160 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1161 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1162 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1163 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1164 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1165 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1166 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1167 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1168 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1169 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1170 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1171 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1172 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1173 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1174 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1175 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1176 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1177 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1178 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1179 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1180 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x1181 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1182 = (float*)myGpuMalloc(524288 * sizeof(float)); float* x1183 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1184 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1185 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1186 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1187 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1188 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1189 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1190 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x1191 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1192 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1193 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1194 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1195 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1196 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1197 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x1198 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1199 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1200 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1201 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1202 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1203 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1204 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1205 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1206 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1207 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1208 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1209 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1210 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1211 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1212 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1213 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1214 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1215 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1216 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1217 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1218 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1219 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1220 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1221 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1222 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1223 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1224 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1225 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1226 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1227 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1228 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1229 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1230 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1231 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1232 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1233 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1234 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1235 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1236 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1237 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1238 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1239 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1240 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1241 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1242 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1243 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1244 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1245 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1246 = (float*)myGpuMalloc(524288 * sizeof(float)); float* x1247 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1248 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1249 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1250 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1251 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1252 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1253 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1254 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1255 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1256 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1257 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1258 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1259 = (float*)myGpuMalloc(1728 * sizeof(float)); float* x1260 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1261 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1262 = (float*)myGpuMalloc(2359296 * sizeof(float)); float* x1263 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1264 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1265 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1266 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1267 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1268 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1269 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1270 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1271 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1272 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x1273 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1274 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1275 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1276 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1277 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1278 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x1279 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1280 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1281 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1282 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1283 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1284 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1285 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1286 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1287 = (float*)myGpuMalloc(131072 * sizeof(float)); float* x1288 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1289 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1290 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1291 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1292 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1293 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1294 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1295 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1296 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1297 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1298 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x1299 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1300 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1301 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1302 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1303 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1304 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1305 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1306 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1307 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1308 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1309 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1310 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1311 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1312 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1313 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1314 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1315 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1316 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1317 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1318 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1319 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x1320 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1321 = (float*)myGpuMalloc(2097152 * sizeof(float)); float* x1322 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x1323 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1324 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1325 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1326 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1327 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1328 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1329 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1330 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1331 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1332 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1333 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1334 = (float*)myGpuMalloc(20480 * sizeof(float)); float* x1335 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1336 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1337 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1338 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1339 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1340 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x1341 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1342 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x1343 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1344 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1345 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1346 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1347 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1348 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1349 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1350 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1351 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1352 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1353 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1354 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1355 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1356 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1357 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1358 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1359 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1360 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1361 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1362 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1363 = (float*)myGpuMalloc(65536 * sizeof(float)); float* x1364 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1365 = (float*)myGpuMalloc(131072 * sizeof(float)); float* x1366 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1367 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x1368 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1369 = (float*)myGpuMalloc(589824 * sizeof(float)); float* x1370 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1371 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1372 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x1373 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1374 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x1375 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1376 = (float*)myGpuMalloc(262144 * sizeof(float)); float* x1377 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1378 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1379 = (float*)myGpuMalloc(1024 * sizeof(float)); double* x1380 = (double*)myMalloc(4 * sizeof(double));; double* x1381 = (double*)myMalloc(4 * sizeof(double));; int64_t x1382 = (long)mallocAddr; int64_t x1383 = (long)gpuMallocAddr; // training loop starts here int32_t x1394 = x11 / 64; int32_t x1411 = 31 / 1; int32_t x1412 = x1411 + 1; int32_t x1416 = 4096 * x1412; int32_t x1417 = x1416 * x1412; int32_t x1413 = x1412 * x1412; int32_t x1414 = 64 * x1413; int32_t x1415 = 64 * x1414; int32_t x1443 = x1412 - 2; int32_t x1444 = x1443 / 2; int32_t x1445 = x1444 + 1; int32_t x1449 = 4096 * x1445; int32_t x1450 = x1449 * x1445; bool x1454 = x1445 >= 1; bool x1455; if (x1454) { x1455 = x1454; } else { x1455 = false; } int32_t x1460 = x1444 / 1; int32_t x1461 = x1460 + 1; int32_t x1465 = 4096 * x1461; int32_t x1466 = x1465 * x1461; int32_t x1462 = x1461 * x1461; int32_t x1463 = 64 * x1462; int32_t x1464 = 64 * x1463; int32_t x1488 = x1461 + 2; bool x1489 = x1488 >= 3; bool x1490; if (x1489) { x1490 = x1489; } else { x1490 = false; } int32_t x1495 = x1488 - 3; int32_t x1496 = x1495 / 1; int32_t x1497 = x1496 + 1; int32_t x1501 = 4096 * x1497; int32_t x1502 = x1501 * x1497; int32_t x1498 = x1497 * x1497; int32_t x1499 = 64 * x1498; int32_t x1500 = 64 * x1499; bool x1524 = x1497 >= 1; bool x1525; if (x1524) { x1525 = x1524; } else { x1525 = false; } int32_t x1530 = x1496 / 1; int32_t x1531 = x1530 + 1; int32_t x1535 = 16384 * x1531; int32_t x1536 = x1535 * x1531; int32_t x1532 = x1531 * x1531; int32_t x1533 = 256 * x1532; int32_t x1534 = 64 * x1533; int32_t x1558 = 16384 * x1461; int32_t x1559 = x1558 * x1461; int32_t x1556 = 256 * x1462; int32_t x1557 = 64 * x1556; bool x1576 = x1461 == 1; bool x1577 = x1461 == x1531; bool x1578 = x1576 || x1577; bool x1579; if (x1578) { x1579 = x1578; } else { x1579 = false; } bool x1594 = x1531 >= 1; bool x1595; if (x1594) { x1595 = x1594; } else { x1595 = false; } int32_t x1600 = x1530 / 1; int32_t x1601 = x1600 + 1; int32_t x1605 = 4096 * x1601; int32_t x1606 = x1605 * x1601; int32_t x1602 = x1601 * x1601; int32_t x1603 = 64 * x1602; int32_t x1604 = 64 * x1603; int32_t x1628 = x1601 + 2; bool x1629 = x1628 >= 3; bool x1630; if (x1629) { x1630 = x1629; } else { x1630 = false; } int32_t x1635 = x1628 - 3; int32_t x1636 = x1635 / 1; int32_t x1637 = x1636 + 1; int32_t x1641 = 4096 * x1637; int32_t x1642 = x1641 * x1637; int32_t x1638 = x1637 * x1637; int32_t x1639 = 64 * x1638; int32_t x1640 = 64 * x1639; bool x1664 = x1637 >= 1; bool x1665; if (x1664) { x1665 = x1664; } else { x1665 = false; } int32_t x1670 = x1636 / 1; int32_t x1671 = x1670 + 1; int32_t x1675 = 16384 * x1671; int32_t x1676 = x1675 * x1671; int32_t x1672 = x1671 * x1671; int32_t x1673 = 256 * x1672; int32_t x1674 = 64 * x1673; bool x1693 = x1531 == 1; bool x1694 = x1531 == x1671; bool x1695 = x1693 || x1694; bool x1696; if (x1695) { x1696 = x1695; } else { x1696 = false; } bool x1711 = x1671 >= 1; bool x1712; if (x1711) { x1712 = x1711; } else { x1712 = false; } int32_t x1717 = x1670 / 1; int32_t x1718 = x1717 + 1; int32_t x1722 = 4096 * x1718; int32_t x1723 = x1722 * x1718; int32_t x1719 = x1718 * x1718; int32_t x1720 = 64 * x1719; int32_t x1721 = 64 * x1720; int32_t x1745 = x1718 + 2; bool x1746 = x1745 >= 3; bool x1747; if (x1746) { x1747 = x1746; } else { x1747 = false; } int32_t x1752 = x1745 - 3; int32_t x1753 = x1752 / 1; int32_t x1754 = x1753 + 1; int32_t x1758 = 4096 * x1754; int32_t x1759 = x1758 * x1754; int32_t x1755 = x1754 * x1754; int32_t x1756 = 64 * x1755; int32_t x1757 = 64 * x1756; bool x1781 = x1754 >= 1; bool x1782; if (x1781) { x1782 = x1781; } else { x1782 = false; } int32_t x1787 = x1753 / 1; int32_t x1788 = x1787 + 1; int32_t x1792 = 16384 * x1788; int32_t x1793 = x1792 * x1788; int32_t x1789 = x1788 * x1788; int32_t x1790 = 256 * x1789; int32_t x1791 = 64 * x1790; bool x1810 = x1671 == 1; bool x1811 = x1671 == x1788; bool x1812 = x1810 || x1811; bool x1813; if (x1812) { x1813 = x1812; } else { x1813 = false; } bool x1828 = x1788 >= 1; bool x1829; if (x1828) { x1829 = x1828; } else { x1829 = false; } int32_t x1834 = x1787 / 1; int32_t x1835 = x1834 + 1; int32_t x1839 = 8192 * x1835; int32_t x1840 = x1839 * x1835; int32_t x1836 = x1835 * x1835; int32_t x1837 = 128 * x1836; int32_t x1838 = 64 * x1837; int32_t x1862 = x1835 + 2; bool x1863 = x1862 >= 3; bool x1864; if (x1863) { x1864 = x1863; } else { x1864 = false; } int32_t x1869 = x1862 - 3; int32_t x1870 = x1869 / 2; int32_t x1871 = x1870 + 1; int32_t x1875 = 8192 * x1871; int32_t x1876 = x1875 * x1871; int32_t x1872 = x1871 * x1871; int32_t x1873 = 128 * x1872; int32_t x1874 = 64 * x1873; bool x1898 = x1871 >= 1; bool x1899; if (x1898) { x1899 = x1898; } else { x1899 = false; } int32_t x1904 = x1870 / 1; int32_t x1905 = x1904 + 1; int32_t x1909 = 32768 * x1905; int32_t x1910 = x1909 * x1905; int32_t x1906 = x1905 * x1905; int32_t x1907 = 512 * x1906; int32_t x1908 = 64 * x1907; int32_t x1930 = x1787 / 2; int32_t x1931 = x1930 + 1; int32_t x1935 = 32768 * x1931; int32_t x1936 = x1935 * x1931; int32_t x1932 = x1931 * x1931; int32_t x1933 = 512 * x1932; int32_t x1934 = 64 * x1933; bool x1953 = x1931 == 1; bool x1954 = x1931 == x1905; bool x1955 = x1953 || x1954; bool x1956; if (x1955) { x1956 = x1955; } else { x1956 = false; } bool x1971 = x1905 >= 1; bool x1972; if (x1971) { x1972 = x1971; } else { x1972 = false; } int32_t x1977 = x1904 / 1; int32_t x1978 = x1977 + 1; int32_t x1982 = 8192 * x1978; int32_t x1983 = x1982 * x1978; int32_t x1979 = x1978 * x1978; int32_t x1980 = 128 * x1979; int32_t x1981 = 64 * x1980; int32_t x2005 = x1978 + 2; bool x2006 = x2005 >= 3; bool x2007; if (x2006) { x2007 = x2006; } else { x2007 = false; } int32_t x2012 = x2005 - 3; int32_t x2013 = x2012 / 1; int32_t x2014 = x2013 + 1; int32_t x2018 = 8192 * x2014; int32_t x2019 = x2018 * x2014; int32_t x2015 = x2014 * x2014; int32_t x2016 = 128 * x2015; int32_t x2017 = 64 * x2016; bool x2041 = x2014 >= 1; bool x2042; if (x2041) { x2042 = x2041; } else { x2042 = false; } int32_t x2047 = x2013 / 1; int32_t x2048 = x2047 + 1; int32_t x2052 = 32768 * x2048; int32_t x2053 = x2052 * x2048; int32_t x2049 = x2048 * x2048; int32_t x2050 = 512 * x2049; int32_t x2051 = 64 * x2050; bool x2070 = x1905 == 1; bool x2071 = x1905 == x2048; bool x2072 = x2070 || x2071; bool x2073; if (x2072) { x2073 = x2072; } else { x2073 = false; } bool x2088 = x2048 >= 1; bool x2089; if (x2088) { x2089 = x2088; } else { x2089 = false; } int32_t x2094 = x2047 / 1; int32_t x2095 = x2094 + 1; int32_t x2099 = 8192 * x2095; int32_t x2100 = x2099 * x2095; int32_t x2096 = x2095 * x2095; int32_t x2097 = 128 * x2096; int32_t x2098 = 64 * x2097; int32_t x2122 = x2095 + 2; bool x2123 = x2122 >= 3; bool x2124; if (x2123) { x2124 = x2123; } else { x2124 = false; } int32_t x2129 = x2122 - 3; int32_t x2130 = x2129 / 1; int32_t x2131 = x2130 + 1; int32_t x2135 = 8192 * x2131; int32_t x2136 = x2135 * x2131; int32_t x2132 = x2131 * x2131; int32_t x2133 = 128 * x2132; int32_t x2134 = 64 * x2133; bool x2158 = x2131 >= 1; bool x2159; if (x2158) { x2159 = x2158; } else { x2159 = false; } int32_t x2164 = x2130 / 1; int32_t x2165 = x2164 + 1; int32_t x2169 = 32768 * x2165; int32_t x2170 = x2169 * x2165; int32_t x2166 = x2165 * x2165; int32_t x2167 = 512 * x2166; int32_t x2168 = 64 * x2167; bool x2187 = x2048 == 1; bool x2188 = x2048 == x2165; bool x2189 = x2187 || x2188; bool x2190; if (x2189) { x2190 = x2189; } else { x2190 = false; } bool x2205 = x2165 >= 1; bool x2206; if (x2205) { x2206 = x2205; } else { x2206 = false; } int32_t x2211 = x2164 / 1; int32_t x2212 = x2211 + 1; int32_t x2216 = 8192 * x2212; int32_t x2217 = x2216 * x2212; int32_t x2213 = x2212 * x2212; int32_t x2214 = 128 * x2213; int32_t x2215 = 64 * x2214; int32_t x2239 = x2212 + 2; bool x2240 = x2239 >= 3; bool x2241; if (x2240) { x2241 = x2240; } else { x2241 = false; } int32_t x2246 = x2239 - 3; int32_t x2247 = x2246 / 1; int32_t x2248 = x2247 + 1; int32_t x2252 = 8192 * x2248; int32_t x2253 = x2252 * x2248; int32_t x2249 = x2248 * x2248; int32_t x2250 = 128 * x2249; int32_t x2251 = 64 * x2250; bool x2275 = x2248 >= 1; bool x2276; if (x2275) { x2276 = x2275; } else { x2276 = false; } int32_t x2281 = x2247 / 1; int32_t x2282 = x2281 + 1; int32_t x2286 = 32768 * x2282; int32_t x2287 = x2286 * x2282; int32_t x2283 = x2282 * x2282; int32_t x2284 = 512 * x2283; int32_t x2285 = 64 * x2284; bool x2304 = x2165 == 1; bool x2305 = x2165 == x2282; bool x2306 = x2304 || x2305; bool x2307; if (x2306) { x2307 = x2306; } else { x2307 = false; } bool x2322 = x2282 >= 1; bool x2323; if (x2322) { x2323 = x2322; } else { x2323 = false; } int32_t x2328 = x2281 / 1; int32_t x2329 = x2328 + 1; int32_t x2333 = 16384 * x2329; int32_t x2334 = x2333 * x2329; int32_t x2330 = x2329 * x2329; int32_t x2331 = 256 * x2330; int32_t x2332 = 64 * x2331; int32_t x2356 = x2329 + 2; bool x2357 = x2356 >= 3; bool x2358; if (x2357) { x2358 = x2357; } else { x2358 = false; } int32_t x2363 = x2356 - 3; int32_t x2364 = x2363 / 2; int32_t x2365 = x2364 + 1; int32_t x2369 = 16384 * x2365; int32_t x2370 = x2369 * x2365; int32_t x2366 = x2365 * x2365; int32_t x2367 = 256 * x2366; int32_t x2368 = 64 * x2367; bool x2392 = x2365 >= 1; bool x2393; if (x2392) { x2393 = x2392; } else { x2393 = false; } int32_t x2398 = x2364 / 1; int32_t x2399 = x2398 + 1; int32_t x2403 = 65536 * x2399; int32_t x2404 = x2403 * x2399; int32_t x2400 = x2399 * x2399; int32_t x2401 = 1024 * x2400; int32_t x2402 = 64 * x2401; int32_t x2424 = x2281 / 2; int32_t x2425 = x2424 + 1; int32_t x2429 = 65536 * x2425; int32_t x2430 = x2429 * x2425; int32_t x2426 = x2425 * x2425; int32_t x2427 = 1024 * x2426; int32_t x2428 = 64 * x2427; bool x2447 = x2425 == 1; bool x2448 = x2425 == x2399; bool x2449 = x2447 || x2448; bool x2450; if (x2449) { x2450 = x2449; } else { x2450 = false; } bool x2465 = x2399 >= 1; bool x2466; if (x2465) { x2466 = x2465; } else { x2466 = false; } int32_t x2471 = x2398 / 1; int32_t x2472 = x2471 + 1; int32_t x2476 = 16384 * x2472; int32_t x2477 = x2476 * x2472; int32_t x2473 = x2472 * x2472; int32_t x2474 = 256 * x2473; int32_t x2475 = 64 * x2474; int32_t x2499 = x2472 + 2; bool x2500 = x2499 >= 3; bool x2501; if (x2500) { x2501 = x2500; } else { x2501 = false; } int32_t x2506 = x2499 - 3; int32_t x2507 = x2506 / 1; int32_t x2508 = x2507 + 1; int32_t x2512 = 16384 * x2508; int32_t x2513 = x2512 * x2508; int32_t x2509 = x2508 * x2508; int32_t x2510 = 256 * x2509; int32_t x2511 = 64 * x2510; bool x2535 = x2508 >= 1; bool x2536; if (x2535) { x2536 = x2535; } else { x2536 = false; } int32_t x2541 = x2507 / 1; int32_t x2542 = x2541 + 1; int32_t x2546 = 65536 * x2542; int32_t x2547 = x2546 * x2542; int32_t x2543 = x2542 * x2542; int32_t x2544 = 1024 * x2543; int32_t x2545 = 64 * x2544; bool x2564 = x2399 == 1; bool x2565 = x2399 == x2542; bool x2566 = x2564 || x2565; bool x2567; if (x2566) { x2567 = x2566; } else { x2567 = false; } bool x2582 = x2542 >= 1; bool x2583; if (x2582) { x2583 = x2582; } else { x2583 = false; } int32_t x2588 = x2541 / 1; int32_t x2589 = x2588 + 1; int32_t x2593 = 16384 * x2589; int32_t x2594 = x2593 * x2589; int32_t x2590 = x2589 * x2589; int32_t x2591 = 256 * x2590; int32_t x2592 = 64 * x2591; int32_t x2616 = x2589 + 2; bool x2617 = x2616 >= 3; bool x2618; if (x2617) { x2618 = x2617; } else { x2618 = false; } int32_t x2623 = x2616 - 3; int32_t x2624 = x2623 / 1; int32_t x2625 = x2624 + 1; int32_t x2629 = 16384 * x2625; int32_t x2630 = x2629 * x2625; int32_t x2626 = x2625 * x2625; int32_t x2627 = 256 * x2626; int32_t x2628 = 64 * x2627; bool x2652 = x2625 >= 1; bool x2653; if (x2652) { x2653 = x2652; } else { x2653 = false; } int32_t x2658 = x2624 / 1; int32_t x2659 = x2658 + 1; int32_t x2663 = 65536 * x2659; int32_t x2664 = x2663 * x2659; int32_t x2660 = x2659 * x2659; int32_t x2661 = 1024 * x2660; int32_t x2662 = 64 * x2661; bool x2681 = x2542 == 1; bool x2682 = x2542 == x2659; bool x2683 = x2681 || x2682; bool x2684; if (x2683) { x2684 = x2683; } else { x2684 = false; } bool x2699 = x2659 >= 1; bool x2700; if (x2699) { x2700 = x2699; } else { x2700 = false; } int32_t x2705 = x2658 / 1; int32_t x2706 = x2705 + 1; int32_t x2710 = 16384 * x2706; int32_t x2711 = x2710 * x2706; int32_t x2707 = x2706 * x2706; int32_t x2708 = 256 * x2707; int32_t x2709 = 64 * x2708; int32_t x2733 = x2706 + 2; bool x2734 = x2733 >= 3; bool x2735; if (x2734) { x2735 = x2734; } else { x2735 = false; } int32_t x2740 = x2733 - 3; int32_t x2741 = x2740 / 1; int32_t x2742 = x2741 + 1; int32_t x2746 = 16384 * x2742; int32_t x2747 = x2746 * x2742; int32_t x2743 = x2742 * x2742; int32_t x2744 = 256 * x2743; int32_t x2745 = 64 * x2744; bool x2769 = x2742 >= 1; bool x2770; if (x2769) { x2770 = x2769; } else { x2770 = false; } int32_t x2775 = x2741 / 1; int32_t x2776 = x2775 + 1; int32_t x2780 = 65536 * x2776; int32_t x2781 = x2780 * x2776; int32_t x2777 = x2776 * x2776; int32_t x2778 = 1024 * x2777; int32_t x2779 = 64 * x2778; bool x2798 = x2659 == 1; bool x2799 = x2659 == x2776; bool x2800 = x2798 || x2799; bool x2801; if (x2800) { x2801 = x2800; } else { x2801 = false; } bool x2816 = x2776 >= 1; bool x2817; if (x2816) { x2817 = x2816; } else { x2817 = false; } int32_t x2822 = x2775 / 1; int32_t x2823 = x2822 + 1; int32_t x2827 = 16384 * x2823; int32_t x2828 = x2827 * x2823; int32_t x2824 = x2823 * x2823; int32_t x2825 = 256 * x2824; int32_t x2826 = 64 * x2825; int32_t x2850 = x2823 + 2; bool x2851 = x2850 >= 3; bool x2852; if (x2851) { x2852 = x2851; } else { x2852 = false; } int32_t x2857 = x2850 - 3; int32_t x2858 = x2857 / 1; int32_t x2859 = x2858 + 1; int32_t x2863 = 16384 * x2859; int32_t x2864 = x2863 * x2859; int32_t x2860 = x2859 * x2859; int32_t x2861 = 256 * x2860; int32_t x2862 = 64 * x2861; bool x2886 = x2859 >= 1; bool x2887; if (x2886) { x2887 = x2886; } else { x2887 = false; } int32_t x2892 = x2858 / 1; int32_t x2893 = x2892 + 1; int32_t x2897 = 65536 * x2893; int32_t x2898 = x2897 * x2893; int32_t x2894 = x2893 * x2893; int32_t x2895 = 1024 * x2894; int32_t x2896 = 64 * x2895; bool x2915 = x2776 == 1; bool x2916 = x2776 == x2893; bool x2917 = x2915 || x2916; bool x2918; if (x2917) { x2918 = x2917; } else { x2918 = false; } bool x2933 = x2893 >= 1; bool x2934; if (x2933) { x2934 = x2933; } else { x2934 = false; } int32_t x2939 = x2892 / 1; int32_t x2940 = x2939 + 1; int32_t x2944 = 16384 * x2940; int32_t x2945 = x2944 * x2940; int32_t x2941 = x2940 * x2940; int32_t x2942 = 256 * x2941; int32_t x2943 = 64 * x2942; int32_t x2967 = x2940 + 2; bool x2968 = x2967 >= 3; bool x2969; if (x2968) { x2969 = x2968; } else { x2969 = false; } int32_t x2974 = x2967 - 3; int32_t x2975 = x2974 / 1; int32_t x2976 = x2975 + 1; int32_t x2980 = 16384 * x2976; int32_t x2981 = x2980 * x2976; int32_t x2977 = x2976 * x2976; int32_t x2978 = 256 * x2977; int32_t x2979 = 64 * x2978; bool x3003 = x2976 >= 1; bool x3004; if (x3003) { x3004 = x3003; } else { x3004 = false; } int32_t x3009 = x2975 / 1; int32_t x3010 = x3009 + 1; int32_t x3014 = 65536 * x3010; int32_t x3015 = x3014 * x3010; int32_t x3011 = x3010 * x3010; int32_t x3012 = 1024 * x3011; int32_t x3013 = 64 * x3012; bool x3032 = x2893 == 1; bool x3033 = x2893 == x3010; bool x3034 = x3032 || x3033; bool x3035; if (x3034) { x3035 = x3034; } else { x3035 = false; } bool x3050 = x3010 >= 1; bool x3051; if (x3050) { x3051 = x3050; } else { x3051 = false; } int32_t x3056 = x3009 / 1; int32_t x3057 = x3056 + 1; int32_t x3061 = 32768 * x3057; int32_t x3062 = x3061 * x3057; int32_t x3058 = x3057 * x3057; int32_t x3059 = 512 * x3058; int32_t x3060 = 64 * x3059; int32_t x3084 = x3057 + 2; bool x3085 = x3084 >= 3; bool x3086; if (x3085) { x3086 = x3085; } else { x3086 = false; } int32_t x3091 = x3084 - 3; int32_t x3092 = x3091 / 2; int32_t x3093 = x3092 + 1; int32_t x3097 = 32768 * x3093; int32_t x3098 = x3097 * x3093; int32_t x3094 = x3093 * x3093; int32_t x3095 = 512 * x3094; int32_t x3096 = 64 * x3095; bool x3120 = x3093 >= 1; bool x3121; if (x3120) { x3121 = x3120; } else { x3121 = false; } int32_t x3126 = x3092 / 1; int32_t x3127 = x3126 + 1; int32_t x3131 = 131072 * x3127; int32_t x3132 = x3131 * x3127; int32_t x3128 = x3127 * x3127; int32_t x3129 = 2048 * x3128; int32_t x3130 = 64 * x3129; int32_t x3152 = x3009 / 2; int32_t x3153 = x3152 + 1; int32_t x3157 = 131072 * x3153; int32_t x3158 = x3157 * x3153; int32_t x3154 = x3153 * x3153; int32_t x3155 = 2048 * x3154; int32_t x3156 = 64 * x3155; bool x3175 = x3153 == 1; bool x3176 = x3153 == x3127; bool x3177 = x3175 || x3176; bool x3178; if (x3177) { x3178 = x3177; } else { x3178 = false; } bool x3193 = x3127 >= 1; bool x3194; if (x3193) { x3194 = x3193; } else { x3194 = false; } int32_t x3199 = x3126 / 1; int32_t x3200 = x3199 + 1; int32_t x3204 = 32768 * x3200; int32_t x3205 = x3204 * x3200; int32_t x3201 = x3200 * x3200; int32_t x3202 = 512 * x3201; int32_t x3203 = 64 * x3202; int32_t x3227 = x3200 + 2; bool x3228 = x3227 >= 3; bool x3229; if (x3228) { x3229 = x3228; } else { x3229 = false; } int32_t x3234 = x3227 - 3; int32_t x3235 = x3234 / 1; int32_t x3236 = x3235 + 1; int32_t x3240 = 32768 * x3236; int32_t x3241 = x3240 * x3236; int32_t x3237 = x3236 * x3236; int32_t x3238 = 512 * x3237; int32_t x3239 = 64 * x3238; bool x3263 = x3236 >= 1; bool x3264; if (x3263) { x3264 = x3263; } else { x3264 = false; } int32_t x3269 = x3235 / 1; int32_t x3270 = x3269 + 1; int32_t x3274 = 131072 * x3270; int32_t x3275 = x3274 * x3270; int32_t x3271 = x3270 * x3270; int32_t x3272 = 2048 * x3271; int32_t x3273 = 64 * x3272; bool x3292 = x3127 == 1; bool x3293 = x3127 == x3270; bool x3294 = x3292 || x3293; bool x3295; if (x3294) { x3295 = x3294; } else { x3295 = false; } bool x3310 = x3270 >= 1; bool x3311; if (x3310) { x3311 = x3310; } else { x3311 = false; } int32_t x3316 = x3269 / 1; int32_t x3317 = x3316 + 1; int32_t x3321 = 32768 * x3317; int32_t x3322 = x3321 * x3317; int32_t x3318 = x3317 * x3317; int32_t x3319 = 512 * x3318; int32_t x3320 = 64 * x3319; int32_t x3344 = x3317 + 2; bool x3345 = x3344 >= 3; bool x3346; if (x3345) { x3346 = x3345; } else { x3346 = false; } int32_t x3351 = x3344 - 3; int32_t x3352 = x3351 / 1; int32_t x3353 = x3352 + 1; int32_t x3357 = 32768 * x3353; int32_t x3358 = x3357 * x3353; int32_t x3354 = x3353 * x3353; int32_t x3355 = 512 * x3354; int32_t x3356 = 64 * x3355; bool x3380 = x3353 >= 1; bool x3381; if (x3380) { x3381 = x3380; } else { x3381 = false; } int32_t x3386 = x3352 / 1; int32_t x3387 = x3386 + 1; int32_t x3391 = 131072 * x3387; int32_t x3392 = x3391 * x3387; int32_t x3388 = x3387 * x3387; int32_t x3389 = 2048 * x3388; int32_t x3390 = 64 * x3389; bool x3409 = x3270 == 1; bool x3410 = x3270 == x3387; bool x3411 = x3409 || x3410; bool x3412; if (x3411) { x3412 = x3411; } else { x3412 = false; } bool x3427 = x3387 >= 2; bool x3428; if (x3427) { x3428 = x3427; } else { x3428 = false; } int32_t x3437 = x3387 - 2; int32_t x3438 = x3437 / 1; int32_t x3439 = x3438 + 1; int32_t x3443 = 131072 * x3439; int32_t x3444 = x3443 * x3439; int32_t x3440 = x3439 * x3439; int32_t x3441 = 2048 * x3440; int32_t x3442 = 64 * x3441; bool x3700 = x3387 == x3270; bool x3701; if (x3700) { x3701 = x3700; } else { x3701 = false; } bool x3702 = x3387 == 1; bool x3703 = x3702 || x3700; bool x3704; if (x3703) { x3704 = x3703; } else { x3704 = false; } bool x3771 = x3270 == x3127; bool x3772; if (x3771) { x3772 = x3771; } else { x3772 = false; } bool x3773 = x3409 || x3771; bool x3774; if (x3773) { x3774 = x3773; } else { x3774 = false; } bool x3841 = x3127 == x3153; bool x3842; if (x3841) { x3842 = x3841; } else { x3842 = false; } bool x3843 = x3292 || x3841; bool x3844; if (x3843) { x3844 = x3843; } else { x3844 = false; } bool x3923 = x3010 == x2893; bool x3924; if (x3923) { x3924 = x3923; } else { x3924 = false; } bool x3925 = x3010 == 1; bool x3926 = x3925 || x3923; bool x3927; if (x3926) { x3927 = x3926; } else { x3927 = false; } bool x3994 = x2893 == x2776; bool x3995; if (x3994) { x3995 = x3994; } else { x3995 = false; } bool x3996 = x3032 || x3994; bool x3997; if (x3996) { x3997 = x3996; } else { x3997 = false; } bool x4064 = x2776 == x2659; bool x4065; if (x4064) { x4065 = x4064; } else { x4065 = false; } bool x4066 = x2915 || x4064; bool x4067; if (x4066) { x4067 = x4066; } else { x4067 = false; } bool x4134 = x2659 == x2542; bool x4135; if (x4134) { x4135 = x4134; } else { x4135 = false; } bool x4136 = x2798 || x4134; bool x4137; if (x4136) { x4137 = x4136; } else { x4137 = false; } bool x4204 = x2542 == x2399; bool x4205; if (x4204) { x4205 = x4204; } else { x4205 = false; } bool x4206 = x2681 || x4204; bool x4207; if (x4206) { x4207 = x4206; } else { x4207 = false; } bool x4274 = x2399 == x2425; bool x4275; if (x4274) { x4275 = x4274; } else { x4275 = false; } bool x4276 = x2564 || x4274; bool x4277; if (x4276) { x4277 = x4276; } else { x4277 = false; } bool x4356 = x2282 == x2165; bool x4357; if (x4356) { x4357 = x4356; } else { x4357 = false; } bool x4358 = x2282 == 1; bool x4359 = x4358 || x4356; bool x4360; if (x4359) { x4360 = x4359; } else { x4360 = false; } bool x4427 = x2165 == x2048; bool x4428; if (x4427) { x4428 = x4427; } else { x4428 = false; } bool x4429 = x2304 || x4427; bool x4430; if (x4429) { x4430 = x4429; } else { x4430 = false; } bool x4497 = x2048 == x1905; bool x4498; if (x4497) { x4498 = x4497; } else { x4498 = false; } bool x4499 = x2187 || x4497; bool x4500; if (x4499) { x4500 = x4499; } else { x4500 = false; } bool x4567 = x1905 == x1931; bool x4568; if (x4567) { x4568 = x4567; } else { x4568 = false; } bool x4569 = x2070 || x4567; bool x4570; if (x4569) { x4570 = x4569; } else { x4570 = false; } bool x4649 = x1788 == x1671; bool x4650; if (x4649) { x4650 = x4649; } else { x4650 = false; } bool x4651 = x1788 == 1; bool x4652 = x4651 || x4649; bool x4653; if (x4652) { x4653 = x4652; } else { x4653 = false; } bool x4720 = x1671 == x1531; bool x4721; if (x4720) { x4721 = x4720; } else { x4721 = false; } bool x4722 = x1810 || x4720; bool x4723; if (x4722) { x4723 = x4722; } else { x4723 = false; } bool x4790 = x1531 == x1461; bool x4791; if (x4790) { x4791 = x4790; } else { x4791 = false; } bool x4792 = x1693 || x4790; bool x4793; if (x4792) { x4793 = x4792; } else { x4793 = false; } int32_t x6494 = x1394 / 10; double x6499 = (double)x11; int64_t x6525 = (int64_t)x11; float x6529 = (float)x11; for(int x1386=0; x1386 < 4; x1386++) { struct timeval begin_1, end_1, diff_1; float x1388 = 0.0f; float x1389 = x1388; float x1390 = x1389; int32_t x1391 = x1386 + 1; printf("Start training epoch %d\n",x1391); gettimeofday(&begin_1, NULL); for(int x1396=0; x1396 < x1394; x1396++) { int32_t x1397 = x1396 * 64; int32_t x1398 = x1397 * 3072; float* x1399 = x13+x1398; int* x1400 = x14+x1397; // Tensor 'toGPU' invocation. float* x1402 = (float*)myGpuMalloc(196608 * sizeof(float)); CUDA_CALL(cudaMemcpy(x1402, x1399, 196608 * sizeof(float), cudaMemcpyHostToDevice)); float* x1404 = (float*)myGpuMalloc(2 * sizeof(float)); int* x1405 = (int32_t*)myGpuMalloc(64 * sizeof(int32_t)); CUDA_CALL(cudaMemcpy(x1405, x1400, 64 * sizeof(int32_t), cudaMemcpyHostToDevice)); float* x1407 = (float*)myGpuMalloc(1 * sizeof(float)); float* x1408 = (float*)myGpuMalloc(1 * sizeof(float)); // allocate memory to save the final loss in CPU Tensor float* x1410 = (float*)myGpuMalloc(1 * sizeof(float)); float* x1418 = (float*)myGpuMalloc(x1417 * sizeof(float)); float* x1419 = (float*)myMalloc(1 * sizeof(float));; x1419[0] = 0.0f; float* x1421 = (float*)myMalloc(1 * sizeof(float));; x1421[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 3, 32, 32)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 3, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1421, in_desc, x1402, filt_desc, x751, conv_desc, algo, ws_data, ws_size, x1419, out_desc, x1418)); }; float* x1424 = (float*)myGpuMalloc(x1417 * sizeof(float)); float* x1425 = (float*)myGpuMalloc(x1415 * sizeof(float)); float* x1426 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1427 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1428 = (float*)myMalloc(1 * sizeof(float));; x1428[0] = 0.0f; float* x1430 = (float*)myMalloc(1 * sizeof(float));; x1430[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1430, x1428, in_desc, x1418, out_desc, x1425, sbmv_desc, x913, x1048, 0.1, x415, x625, 1.0E-5, x1426, x1427)); }; float* x1433 = (float*)myGpuMalloc(x1417 * sizeof(float)); float* x1434 = (float*)myMalloc(1 * sizeof(float));; x1434[0] = 0.0f; float* x1436 = (float*)myMalloc(1 * sizeof(float));; x1436[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1436, x_desc, x1425, x1434, x_desc, x1425)); }; float* x1439 = (float*)myMalloc(1 * sizeof(float));; x1439[0] = 0.0f; float* x1441 = (float*)myMalloc(1 * sizeof(float));; x1441[0] = 1.0f; float* x1451 = (float*)myGpuMalloc(x1450 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x1441, in_desc, x1425, x1439, out_desc, x1451)); }; float* x1453 = (float*)myGpuMalloc(x1450 * sizeof(float)); if (x1455) { } else { assert(false && "ERROR not specified"); } float* x1467 = (float*)myGpuMalloc(x1466 * sizeof(float)); float* x1468 = (float*)myMalloc(1 * sizeof(float));; x1468[0] = 0.0f; float* x1470 = (float*)myMalloc(1 * sizeof(float));; x1470[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1470, in_desc, x1451, filt_desc, x994, conv_desc, algo, ws_data, ws_size, x1468, out_desc, x1467)); }; float* x1473 = (float*)myGpuMalloc(x1466 * sizeof(float)); float* x1474 = (float*)myGpuMalloc(x1464 * sizeof(float)); float* x1475 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1476 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1477 = (float*)myMalloc(1 * sizeof(float));; x1477[0] = 0.0f; float* x1479 = (float*)myMalloc(1 * sizeof(float));; x1479[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1479, x1477, in_desc, x1467, out_desc, x1474, sbmv_desc, x373, x454, 0.1, x637, x448, 1.0E-5, x1475, x1476)); }; float* x1482 = (float*)myGpuMalloc(x1466 * sizeof(float)); float* x1483 = (float*)myMalloc(1 * sizeof(float));; x1483[0] = 0.0f; float* x1485 = (float*)myMalloc(1 * sizeof(float));; x1485[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1485, x_desc, x1474, x1483, x_desc, x1474)); }; if (x1490) { } else { assert(false && "ERROR not specified"); } float* x1503 = (float*)myGpuMalloc(x1502 * sizeof(float)); float* x1504 = (float*)myMalloc(1 * sizeof(float));; x1504[0] = 0.0f; float* x1506 = (float*)myMalloc(1 * sizeof(float));; x1506[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1506, in_desc, x1474, filt_desc, x565, conv_desc, algo, ws_data, ws_size, x1504, out_desc, x1503)); }; float* x1509 = (float*)myGpuMalloc(x1502 * sizeof(float)); float* x1510 = (float*)myGpuMalloc(x1500 * sizeof(float)); float* x1511 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1512 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1513 = (float*)myMalloc(1 * sizeof(float));; x1513[0] = 0.0f; float* x1515 = (float*)myMalloc(1 * sizeof(float));; x1515[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1515, x1513, in_desc, x1503, out_desc, x1510, sbmv_desc, x787, x442, 0.1, x610, x769, 1.0E-5, x1511, x1512)); }; float* x1518 = (float*)myGpuMalloc(x1502 * sizeof(float)); float* x1519 = (float*)myMalloc(1 * sizeof(float));; x1519[0] = 0.0f; float* x1521 = (float*)myMalloc(1 * sizeof(float));; x1521[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1521, x_desc, x1510, x1519, x_desc, x1510)); }; if (x1525) { } else { assert(false && "ERROR not specified"); } float* x1537 = (float*)myGpuMalloc(x1536 * sizeof(float)); float* x1538 = (float*)myMalloc(1 * sizeof(float));; x1538[0] = 0.0f; float* x1540 = (float*)myMalloc(1 * sizeof(float));; x1540[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1540, in_desc, x1510, filt_desc, x391, conv_desc, algo, ws_data, ws_size, x1538, out_desc, x1537)); }; float* x1543 = (float*)myGpuMalloc(x1536 * sizeof(float)); float* x1544 = (float*)myGpuMalloc(x1534 * sizeof(float)); float* x1545 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1546 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1547 = (float*)myMalloc(1 * sizeof(float));; x1547[0] = 0.0f; float* x1549 = (float*)myMalloc(1 * sizeof(float));; x1549[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1549, x1547, in_desc, x1537, out_desc, x1544, sbmv_desc, x892, x673, 0.1, x508, x403, 1.0E-5, x1545, x1546)); }; float* x1552 = (float*)myGpuMalloc(x1536 * sizeof(float)); if (x1455) { } else { assert(false && "ERROR not specified"); } float* x1560 = (float*)myGpuMalloc(x1559 * sizeof(float)); float* x1561 = (float*)myMalloc(1 * sizeof(float));; x1561[0] = 0.0f; float* x1563 = (float*)myMalloc(1 * sizeof(float));; x1563[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1563, in_desc, x1451, filt_desc, x781, conv_desc, algo, ws_data, ws_size, x1561, out_desc, x1560)); }; float* x1566 = (float*)myGpuMalloc(x1559 * sizeof(float)); float* x1567 = (float*)myGpuMalloc(x1557 * sizeof(float)); float* x1568 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1569 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1570 = (float*)myMalloc(1 * sizeof(float));; x1570[0] = 0.0f; float* x1572 = (float*)myMalloc(1 * sizeof(float));; x1572[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1572, x1570, in_desc, x1560, out_desc, x1567, sbmv_desc, x523, x904, 0.1, x1087, x1024, 1.0E-5, x1568, x1569)); }; float* x1575 = (float*)myGpuMalloc(x1559 * sizeof(float)); if (x1579) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1461) x Sym(1461), res: x Const(64) x Const(256) x Sym(1531) x Sym(1531)"); } float* x1584 = (float*)myMalloc(1 * sizeof(float));; x1584[0] = 1.0f; float* x1586 = (float*)myMalloc(1 * sizeof(float));; x1586[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1584, bias_desc, x1567, x1586, out_desc, x1544)); }; float* x1589 = (float*)myMalloc(1 * sizeof(float));; x1589[0] = 0.0f; float* x1591 = (float*)myMalloc(1 * sizeof(float));; x1591[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1591, x_desc, x1544, x1589, x_desc, x1544)); }; if (x1595) { } else { assert(false && "ERROR not specified"); } float* x1607 = (float*)myGpuMalloc(x1606 * sizeof(float)); float* x1608 = (float*)myMalloc(1 * sizeof(float));; x1608[0] = 0.0f; float* x1610 = (float*)myMalloc(1 * sizeof(float));; x1610[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1610, in_desc, x1544, filt_desc, x808, conv_desc, algo, ws_data, ws_size, x1608, out_desc, x1607)); }; float* x1613 = (float*)myGpuMalloc(x1606 * sizeof(float)); float* x1614 = (float*)myGpuMalloc(x1604 * sizeof(float)); float* x1615 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1616 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1617 = (float*)myMalloc(1 * sizeof(float));; x1617[0] = 0.0f; float* x1619 = (float*)myMalloc(1 * sizeof(float));; x1619[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1619, x1617, in_desc, x1607, out_desc, x1614, sbmv_desc, x721, x475, 0.1, x325, x601, 1.0E-5, x1615, x1616)); }; float* x1622 = (float*)myGpuMalloc(x1606 * sizeof(float)); float* x1623 = (float*)myMalloc(1 * sizeof(float));; x1623[0] = 0.0f; float* x1625 = (float*)myMalloc(1 * sizeof(float));; x1625[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1625, x_desc, x1614, x1623, x_desc, x1614)); }; if (x1630) { } else { assert(false && "ERROR not specified"); } float* x1643 = (float*)myGpuMalloc(x1642 * sizeof(float)); float* x1644 = (float*)myMalloc(1 * sizeof(float));; x1644[0] = 0.0f; float* x1646 = (float*)myMalloc(1 * sizeof(float));; x1646[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1646, in_desc, x1614, filt_desc, x544, conv_desc, algo, ws_data, ws_size, x1644, out_desc, x1643)); }; float* x1649 = (float*)myGpuMalloc(x1642 * sizeof(float)); float* x1650 = (float*)myGpuMalloc(x1640 * sizeof(float)); float* x1651 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1652 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1653 = (float*)myMalloc(1 * sizeof(float));; x1653[0] = 0.0f; float* x1655 = (float*)myMalloc(1 * sizeof(float));; x1655[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1655, x1653, in_desc, x1643, out_desc, x1650, sbmv_desc, x919, x754, 0.1, x427, x1027, 1.0E-5, x1651, x1652)); }; float* x1658 = (float*)myGpuMalloc(x1642 * sizeof(float)); float* x1659 = (float*)myMalloc(1 * sizeof(float));; x1659[0] = 0.0f; float* x1661 = (float*)myMalloc(1 * sizeof(float));; x1661[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1661, x_desc, x1650, x1659, x_desc, x1650)); }; if (x1665) { } else { assert(false && "ERROR not specified"); } float* x1677 = (float*)myGpuMalloc(x1676 * sizeof(float)); float* x1678 = (float*)myMalloc(1 * sizeof(float));; x1678[0] = 0.0f; float* x1680 = (float*)myMalloc(1 * sizeof(float));; x1680[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1680, in_desc, x1650, filt_desc, x685, conv_desc, algo, ws_data, ws_size, x1678, out_desc, x1677)); }; float* x1683 = (float*)myGpuMalloc(x1676 * sizeof(float)); float* x1684 = (float*)myGpuMalloc(x1674 * sizeof(float)); float* x1685 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1686 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1687 = (float*)myMalloc(1 * sizeof(float));; x1687[0] = 0.0f; float* x1689 = (float*)myMalloc(1 * sizeof(float));; x1689[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1689, x1687, in_desc, x1677, out_desc, x1684, sbmv_desc, x469, x316, 0.1, x568, x793, 1.0E-5, x1685, x1686)); }; float* x1692 = (float*)myGpuMalloc(x1676 * sizeof(float)); if (x1696) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1531) x Sym(1531), res: x Const(64) x Const(256) x Sym(1671) x Sym(1671)"); } float* x1701 = (float*)myMalloc(1 * sizeof(float));; x1701[0] = 1.0f; float* x1703 = (float*)myMalloc(1 * sizeof(float));; x1703[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1701, bias_desc, x1544, x1703, out_desc, x1684)); }; float* x1706 = (float*)myMalloc(1 * sizeof(float));; x1706[0] = 0.0f; float* x1708 = (float*)myMalloc(1 * sizeof(float));; x1708[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1708, x_desc, x1684, x1706, x_desc, x1684)); }; if (x1712) { } else { assert(false && "ERROR not specified"); } float* x1724 = (float*)myGpuMalloc(x1723 * sizeof(float)); float* x1725 = (float*)myMalloc(1 * sizeof(float));; x1725[0] = 0.0f; float* x1727 = (float*)myMalloc(1 * sizeof(float));; x1727[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1727, in_desc, x1684, filt_desc, x745, conv_desc, algo, ws_data, ws_size, x1725, out_desc, x1724)); }; float* x1730 = (float*)myGpuMalloc(x1723 * sizeof(float)); float* x1731 = (float*)myGpuMalloc(x1721 * sizeof(float)); float* x1732 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1733 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1734 = (float*)myMalloc(1 * sizeof(float));; x1734[0] = 0.0f; float* x1736 = (float*)myMalloc(1 * sizeof(float));; x1736[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1736, x1734, in_desc, x1724, out_desc, x1731, sbmv_desc, x538, x367, 0.1, x1066, x856, 1.0E-5, x1732, x1733)); }; float* x1739 = (float*)myGpuMalloc(x1723 * sizeof(float)); float* x1740 = (float*)myMalloc(1 * sizeof(float));; x1740[0] = 0.0f; float* x1742 = (float*)myMalloc(1 * sizeof(float));; x1742[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1742, x_desc, x1731, x1740, x_desc, x1731)); }; if (x1747) { } else { assert(false && "ERROR not specified"); } float* x1760 = (float*)myGpuMalloc(x1759 * sizeof(float)); float* x1761 = (float*)myMalloc(1 * sizeof(float));; x1761[0] = 0.0f; float* x1763 = (float*)myMalloc(1 * sizeof(float));; x1763[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1763, in_desc, x1731, filt_desc, x514, conv_desc, algo, ws_data, ws_size, x1761, out_desc, x1760)); }; float* x1766 = (float*)myGpuMalloc(x1759 * sizeof(float)); float* x1767 = (float*)myGpuMalloc(x1757 * sizeof(float)); float* x1768 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1769 = (float*)myGpuMalloc(64 * sizeof(float)); float* x1770 = (float*)myMalloc(1 * sizeof(float));; x1770[0] = 0.0f; float* x1772 = (float*)myMalloc(1 * sizeof(float));; x1772[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1772, x1770, in_desc, x1760, out_desc, x1767, sbmv_desc, x511, x700, 0.1, x832, x649, 1.0E-5, x1768, x1769)); }; float* x1775 = (float*)myGpuMalloc(x1759 * sizeof(float)); float* x1776 = (float*)myMalloc(1 * sizeof(float));; x1776[0] = 0.0f; float* x1778 = (float*)myMalloc(1 * sizeof(float));; x1778[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1778, x_desc, x1767, x1776, x_desc, x1767)); }; if (x1782) { } else { assert(false && "ERROR not specified"); } float* x1794 = (float*)myGpuMalloc(x1793 * sizeof(float)); float* x1795 = (float*)myMalloc(1 * sizeof(float));; x1795[0] = 0.0f; float* x1797 = (float*)myMalloc(1 * sizeof(float));; x1797[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1797, in_desc, x1767, filt_desc, x556, conv_desc, algo, ws_data, ws_size, x1795, out_desc, x1794)); }; float* x1800 = (float*)myGpuMalloc(x1793 * sizeof(float)); float* x1801 = (float*)myGpuMalloc(x1791 * sizeof(float)); float* x1802 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1803 = (float*)myGpuMalloc(256 * sizeof(float)); float* x1804 = (float*)myMalloc(1 * sizeof(float));; x1804[0] = 0.0f; float* x1806 = (float*)myMalloc(1 * sizeof(float));; x1806[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1806, x1804, in_desc, x1794, out_desc, x1801, sbmv_desc, x406, x1036, 0.1, x847, x694, 1.0E-5, x1802, x1803)); }; float* x1809 = (float*)myGpuMalloc(x1793 * sizeof(float)); if (x1813) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1671) x Sym(1671), res: x Const(64) x Const(256) x Sym(1788) x Sym(1788)"); } float* x1818 = (float*)myMalloc(1 * sizeof(float));; x1818[0] = 1.0f; float* x1820 = (float*)myMalloc(1 * sizeof(float));; x1820[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1818, bias_desc, x1684, x1820, out_desc, x1801)); }; float* x1823 = (float*)myMalloc(1 * sizeof(float));; x1823[0] = 0.0f; float* x1825 = (float*)myMalloc(1 * sizeof(float));; x1825[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1825, x_desc, x1801, x1823, x_desc, x1801)); }; if (x1829) { } else { assert(false && "ERROR not specified"); } float* x1841 = (float*)myGpuMalloc(x1840 * sizeof(float)); float* x1842 = (float*)myMalloc(1 * sizeof(float));; x1842[0] = 0.0f; float* x1844 = (float*)myMalloc(1 * sizeof(float));; x1844[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1844, in_desc, x1801, filt_desc, x328, conv_desc, algo, ws_data, ws_size, x1842, out_desc, x1841)); }; float* x1847 = (float*)myGpuMalloc(x1840 * sizeof(float)); float* x1848 = (float*)myGpuMalloc(x1838 * sizeof(float)); float* x1849 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1850 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1851 = (float*)myMalloc(1 * sizeof(float));; x1851[0] = 0.0f; float* x1853 = (float*)myMalloc(1 * sizeof(float));; x1853[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1853, x1851, in_desc, x1841, out_desc, x1848, sbmv_desc, x547, x811, 0.1, x907, x697, 1.0E-5, x1849, x1850)); }; float* x1856 = (float*)myGpuMalloc(x1840 * sizeof(float)); float* x1857 = (float*)myMalloc(1 * sizeof(float));; x1857[0] = 0.0f; float* x1859 = (float*)myMalloc(1 * sizeof(float));; x1859[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1859, x_desc, x1848, x1857, x_desc, x1848)); }; if (x1864) { } else { assert(false && "ERROR not specified"); } float* x1877 = (float*)myGpuMalloc(x1876 * sizeof(float)); float* x1878 = (float*)myMalloc(1 * sizeof(float));; x1878[0] = 0.0f; float* x1880 = (float*)myMalloc(1 * sizeof(float));; x1880[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1880, in_desc, x1848, filt_desc, x376, conv_desc, algo, ws_data, ws_size, x1878, out_desc, x1877)); }; float* x1883 = (float*)myGpuMalloc(x1876 * sizeof(float)); float* x1884 = (float*)myGpuMalloc(x1874 * sizeof(float)); float* x1885 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1886 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1887 = (float*)myMalloc(1 * sizeof(float));; x1887[0] = 0.0f; float* x1889 = (float*)myMalloc(1 * sizeof(float));; x1889[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1889, x1887, in_desc, x1877, out_desc, x1884, sbmv_desc, x1051, x865, 0.1, x679, x424, 1.0E-5, x1885, x1886)); }; float* x1892 = (float*)myGpuMalloc(x1876 * sizeof(float)); float* x1893 = (float*)myMalloc(1 * sizeof(float));; x1893[0] = 0.0f; float* x1895 = (float*)myMalloc(1 * sizeof(float));; x1895[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1895, x_desc, x1884, x1893, x_desc, x1884)); }; if (x1899) { } else { assert(false && "ERROR not specified"); } float* x1911 = (float*)myGpuMalloc(x1910 * sizeof(float)); float* x1912 = (float*)myMalloc(1 * sizeof(float));; x1912[0] = 0.0f; float* x1914 = (float*)myMalloc(1 * sizeof(float));; x1914[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1914, in_desc, x1884, filt_desc, x613, conv_desc, algo, ws_data, ws_size, x1912, out_desc, x1911)); }; float* x1917 = (float*)myGpuMalloc(x1910 * sizeof(float)); float* x1918 = (float*)myGpuMalloc(x1908 * sizeof(float)); float* x1919 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1920 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1921 = (float*)myMalloc(1 * sizeof(float));; x1921[0] = 0.0f; float* x1923 = (float*)myMalloc(1 * sizeof(float));; x1923[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1923, x1921, in_desc, x1911, out_desc, x1918, sbmv_desc, x730, x925, 0.1, x742, x598, 1.0E-5, x1919, x1920)); }; float* x1926 = (float*)myGpuMalloc(x1910 * sizeof(float)); if (x1829) { } else { assert(false && "ERROR not specified"); } float* x1937 = (float*)myGpuMalloc(x1936 * sizeof(float)); float* x1938 = (float*)myMalloc(1 * sizeof(float));; x1938[0] = 0.0f; float* x1940 = (float*)myMalloc(1 * sizeof(float));; x1940[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1940, in_desc, x1801, filt_desc, x1069, conv_desc, algo, ws_data, ws_size, x1938, out_desc, x1937)); }; float* x1943 = (float*)myGpuMalloc(x1936 * sizeof(float)); float* x1944 = (float*)myGpuMalloc(x1934 * sizeof(float)); float* x1945 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1946 = (float*)myGpuMalloc(512 * sizeof(float)); float* x1947 = (float*)myMalloc(1 * sizeof(float));; x1947[0] = 0.0f; float* x1949 = (float*)myMalloc(1 * sizeof(float));; x1949[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1949, x1947, in_desc, x1937, out_desc, x1944, sbmv_desc, x916, x652, 0.1, x421, x364, 1.0E-5, x1945, x1946)); }; float* x1952 = (float*)myGpuMalloc(x1936 * sizeof(float)); if (x1956) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1931) x Sym(1931), res: x Const(64) x Const(512) x Sym(1905) x Sym(1905)"); } float* x1961 = (float*)myMalloc(1 * sizeof(float));; x1961[0] = 1.0f; float* x1963 = (float*)myMalloc(1 * sizeof(float));; x1963[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1961, bias_desc, x1944, x1963, out_desc, x1918)); }; float* x1966 = (float*)myMalloc(1 * sizeof(float));; x1966[0] = 0.0f; float* x1968 = (float*)myMalloc(1 * sizeof(float));; x1968[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1968, x_desc, x1918, x1966, x_desc, x1918)); }; if (x1972) { } else { assert(false && "ERROR not specified"); } float* x1984 = (float*)myGpuMalloc(x1983 * sizeof(float)); float* x1985 = (float*)myMalloc(1 * sizeof(float));; x1985[0] = 0.0f; float* x1987 = (float*)myMalloc(1 * sizeof(float));; x1987[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1987, in_desc, x1918, filt_desc, x1063, conv_desc, algo, ws_data, ws_size, x1985, out_desc, x1984)); }; float* x1990 = (float*)myGpuMalloc(x1983 * sizeof(float)); float* x1991 = (float*)myGpuMalloc(x1981 * sizeof(float)); float* x1992 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1993 = (float*)myGpuMalloc(128 * sizeof(float)); float* x1994 = (float*)myMalloc(1 * sizeof(float));; x1994[0] = 0.0f; float* x1996 = (float*)myMalloc(1 * sizeof(float));; x1996[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x1996, x1994, in_desc, x1984, out_desc, x1991, sbmv_desc, x961, x346, 0.1, x595, x826, 1.0E-5, x1992, x1993)); }; float* x1999 = (float*)myGpuMalloc(x1983 * sizeof(float)); float* x2000 = (float*)myMalloc(1 * sizeof(float));; x2000[0] = 0.0f; float* x2002 = (float*)myMalloc(1 * sizeof(float));; x2002[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2002, x_desc, x1991, x2000, x_desc, x1991)); }; if (x2007) { } else { assert(false && "ERROR not specified"); } float* x2020 = (float*)myGpuMalloc(x2019 * sizeof(float)); float* x2021 = (float*)myMalloc(1 * sizeof(float));; x2021[0] = 0.0f; float* x2023 = (float*)myMalloc(1 * sizeof(float));; x2023[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2023, in_desc, x1991, filt_desc, x1000, conv_desc, algo, ws_data, ws_size, x2021, out_desc, x2020)); }; float* x2026 = (float*)myGpuMalloc(x2019 * sizeof(float)); float* x2027 = (float*)myGpuMalloc(x2017 * sizeof(float)); float* x2028 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2029 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2030 = (float*)myMalloc(1 * sizeof(float));; x2030[0] = 0.0f; float* x2032 = (float*)myMalloc(1 * sizeof(float));; x2032[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2032, x2030, in_desc, x2020, out_desc, x2027, sbmv_desc, x319, x580, 0.1, x400, x970, 1.0E-5, x2028, x2029)); }; float* x2035 = (float*)myGpuMalloc(x2019 * sizeof(float)); float* x2036 = (float*)myMalloc(1 * sizeof(float));; x2036[0] = 0.0f; float* x2038 = (float*)myMalloc(1 * sizeof(float));; x2038[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2038, x_desc, x2027, x2036, x_desc, x2027)); }; if (x2042) { } else { assert(false && "ERROR not specified"); } float* x2054 = (float*)myGpuMalloc(x2053 * sizeof(float)); float* x2055 = (float*)myMalloc(1 * sizeof(float));; x2055[0] = 0.0f; float* x2057 = (float*)myMalloc(1 * sizeof(float));; x2057[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2057, in_desc, x2027, filt_desc, x628, conv_desc, algo, ws_data, ws_size, x2055, out_desc, x2054)); }; float* x2060 = (float*)myGpuMalloc(x2053 * sizeof(float)); float* x2061 = (float*)myGpuMalloc(x2051 * sizeof(float)); float* x2062 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2063 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2064 = (float*)myMalloc(1 * sizeof(float));; x2064[0] = 0.0f; float* x2066 = (float*)myMalloc(1 * sizeof(float));; x2066[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2066, x2064, in_desc, x2054, out_desc, x2061, sbmv_desc, x451, x1033, 0.1, x736, x559, 1.0E-5, x2062, x2063)); }; float* x2069 = (float*)myGpuMalloc(x2053 * sizeof(float)); if (x2073) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1905) x Sym(1905), res: x Const(64) x Const(512) x Sym(2048) x Sym(2048)"); } float* x2078 = (float*)myMalloc(1 * sizeof(float));; x2078[0] = 1.0f; float* x2080 = (float*)myMalloc(1 * sizeof(float));; x2080[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2078, bias_desc, x1918, x2080, out_desc, x2061)); }; float* x2083 = (float*)myMalloc(1 * sizeof(float));; x2083[0] = 0.0f; float* x2085 = (float*)myMalloc(1 * sizeof(float));; x2085[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2085, x_desc, x2061, x2083, x_desc, x2061)); }; if (x2089) { } else { assert(false && "ERROR not specified"); } float* x2101 = (float*)myGpuMalloc(x2100 * sizeof(float)); float* x2102 = (float*)myMalloc(1 * sizeof(float));; x2102[0] = 0.0f; float* x2104 = (float*)myMalloc(1 * sizeof(float));; x2104[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2104, in_desc, x2061, filt_desc, x883, conv_desc, algo, ws_data, ws_size, x2102, out_desc, x2101)); }; float* x2107 = (float*)myGpuMalloc(x2100 * sizeof(float)); float* x2108 = (float*)myGpuMalloc(x2098 * sizeof(float)); float* x2109 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2110 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2111 = (float*)myMalloc(1 * sizeof(float));; x2111[0] = 0.0f; float* x2113 = (float*)myMalloc(1 * sizeof(float));; x2113[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2113, x2111, in_desc, x2101, out_desc, x2108, sbmv_desc, x430, x805, 0.1, x631, x322, 1.0E-5, x2109, x2110)); }; float* x2116 = (float*)myGpuMalloc(x2100 * sizeof(float)); float* x2117 = (float*)myMalloc(1 * sizeof(float));; x2117[0] = 0.0f; float* x2119 = (float*)myMalloc(1 * sizeof(float));; x2119[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2119, x_desc, x2108, x2117, x_desc, x2108)); }; if (x2124) { } else { assert(false && "ERROR not specified"); } float* x2137 = (float*)myGpuMalloc(x2136 * sizeof(float)); float* x2138 = (float*)myMalloc(1 * sizeof(float));; x2138[0] = 0.0f; float* x2140 = (float*)myMalloc(1 * sizeof(float));; x2140[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2140, in_desc, x2108, filt_desc, x868, conv_desc, algo, ws_data, ws_size, x2138, out_desc, x2137)); }; float* x2143 = (float*)myGpuMalloc(x2136 * sizeof(float)); float* x2144 = (float*)myGpuMalloc(x2134 * sizeof(float)); float* x2145 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2146 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2147 = (float*)myMalloc(1 * sizeof(float));; x2147[0] = 0.0f; float* x2149 = (float*)myMalloc(1 * sizeof(float));; x2149[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2149, x2147, in_desc, x2137, out_desc, x2144, sbmv_desc, x676, x478, 0.1, x946, x1093, 1.0E-5, x2145, x2146)); }; float* x2152 = (float*)myGpuMalloc(x2136 * sizeof(float)); float* x2153 = (float*)myMalloc(1 * sizeof(float));; x2153[0] = 0.0f; float* x2155 = (float*)myMalloc(1 * sizeof(float));; x2155[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2155, x_desc, x2144, x2153, x_desc, x2144)); }; if (x2159) { } else { assert(false && "ERROR not specified"); } float* x2171 = (float*)myGpuMalloc(x2170 * sizeof(float)); float* x2172 = (float*)myMalloc(1 * sizeof(float));; x2172[0] = 0.0f; float* x2174 = (float*)myMalloc(1 * sizeof(float));; x2174[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2174, in_desc, x2144, filt_desc, x418, conv_desc, algo, ws_data, ws_size, x2172, out_desc, x2171)); }; float* x2177 = (float*)myGpuMalloc(x2170 * sizeof(float)); float* x2178 = (float*)myGpuMalloc(x2168 * sizeof(float)); float* x2179 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2180 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2181 = (float*)myMalloc(1 * sizeof(float));; x2181[0] = 0.0f; float* x2183 = (float*)myMalloc(1 * sizeof(float));; x2183[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2183, x2181, in_desc, x2171, out_desc, x2178, sbmv_desc, x796, x541, 0.1, x370, x964, 1.0E-5, x2179, x2180)); }; float* x2186 = (float*)myGpuMalloc(x2170 * sizeof(float)); if (x2190) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2048) x Sym(2048), res: x Const(64) x Const(512) x Sym(2165) x Sym(2165)"); } float* x2195 = (float*)myMalloc(1 * sizeof(float));; x2195[0] = 1.0f; float* x2197 = (float*)myMalloc(1 * sizeof(float));; x2197[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2195, bias_desc, x2061, x2197, out_desc, x2178)); }; float* x2200 = (float*)myMalloc(1 * sizeof(float));; x2200[0] = 0.0f; float* x2202 = (float*)myMalloc(1 * sizeof(float));; x2202[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2202, x_desc, x2178, x2200, x_desc, x2178)); }; if (x2206) { } else { assert(false && "ERROR not specified"); } float* x2218 = (float*)myGpuMalloc(x2217 * sizeof(float)); float* x2219 = (float*)myMalloc(1 * sizeof(float));; x2219[0] = 0.0f; float* x2221 = (float*)myMalloc(1 * sizeof(float));; x2221[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2221, in_desc, x2178, filt_desc, x691, conv_desc, algo, ws_data, ws_size, x2219, out_desc, x2218)); }; float* x2224 = (float*)myGpuMalloc(x2217 * sizeof(float)); float* x2225 = (float*)myGpuMalloc(x2215 * sizeof(float)); float* x2226 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2227 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2228 = (float*)myMalloc(1 * sizeof(float));; x2228[0] = 0.0f; float* x2230 = (float*)myMalloc(1 * sizeof(float));; x2230[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2230, x2228, in_desc, x2218, out_desc, x2225, sbmv_desc, x412, x1021, 0.1, x1003, x1078, 1.0E-5, x2226, x2227)); }; float* x2233 = (float*)myGpuMalloc(x2217 * sizeof(float)); float* x2234 = (float*)myMalloc(1 * sizeof(float));; x2234[0] = 0.0f; float* x2236 = (float*)myMalloc(1 * sizeof(float));; x2236[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2236, x_desc, x2225, x2234, x_desc, x2225)); }; if (x2241) { } else { assert(false && "ERROR not specified"); } float* x2254 = (float*)myGpuMalloc(x2253 * sizeof(float)); float* x2255 = (float*)myMalloc(1 * sizeof(float));; x2255[0] = 0.0f; float* x2257 = (float*)myMalloc(1 * sizeof(float));; x2257[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2257, in_desc, x2225, filt_desc, x790, conv_desc, algo, ws_data, ws_size, x2255, out_desc, x2254)); }; float* x2260 = (float*)myGpuMalloc(x2253 * sizeof(float)); float* x2261 = (float*)myGpuMalloc(x2251 * sizeof(float)); float* x2262 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2263 = (float*)myGpuMalloc(128 * sizeof(float)); float* x2264 = (float*)myMalloc(1 * sizeof(float));; x2264[0] = 0.0f; float* x2266 = (float*)myMalloc(1 * sizeof(float));; x2266[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2266, x2264, in_desc, x2254, out_desc, x2261, sbmv_desc, x532, x409, 0.1, x1099, x739, 1.0E-5, x2262, x2263)); }; float* x2269 = (float*)myGpuMalloc(x2253 * sizeof(float)); float* x2270 = (float*)myMalloc(1 * sizeof(float));; x2270[0] = 0.0f; float* x2272 = (float*)myMalloc(1 * sizeof(float));; x2272[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2272, x_desc, x2261, x2270, x_desc, x2261)); }; if (x2276) { } else { assert(false && "ERROR not specified"); } float* x2288 = (float*)myGpuMalloc(x2287 * sizeof(float)); float* x2289 = (float*)myMalloc(1 * sizeof(float));; x2289[0] = 0.0f; float* x2291 = (float*)myMalloc(1 * sizeof(float));; x2291[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2291, in_desc, x2261, filt_desc, x460, conv_desc, algo, ws_data, ws_size, x2289, out_desc, x2288)); }; float* x2294 = (float*)myGpuMalloc(x2287 * sizeof(float)); float* x2295 = (float*)myGpuMalloc(x2285 * sizeof(float)); float* x2296 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2297 = (float*)myGpuMalloc(512 * sizeof(float)); float* x2298 = (float*)myMalloc(1 * sizeof(float));; x2298[0] = 0.0f; float* x2300 = (float*)myMalloc(1 * sizeof(float));; x2300[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2300, x2298, in_desc, x2288, out_desc, x2295, sbmv_desc, x763, x457, 0.1, x352, x997, 1.0E-5, x2296, x2297)); }; float* x2303 = (float*)myGpuMalloc(x2287 * sizeof(float)); if (x2307) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2165) x Sym(2165), res: x Const(64) x Const(512) x Sym(2282) x Sym(2282)"); } float* x2312 = (float*)myMalloc(1 * sizeof(float));; x2312[0] = 1.0f; float* x2314 = (float*)myMalloc(1 * sizeof(float));; x2314[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2312, bias_desc, x2178, x2314, out_desc, x2295)); }; float* x2317 = (float*)myMalloc(1 * sizeof(float));; x2317[0] = 0.0f; float* x2319 = (float*)myMalloc(1 * sizeof(float));; x2319[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2319, x_desc, x2295, x2317, x_desc, x2295)); }; if (x2323) { } else { assert(false && "ERROR not specified"); } float* x2335 = (float*)myGpuMalloc(x2334 * sizeof(float)); float* x2336 = (float*)myMalloc(1 * sizeof(float));; x2336[0] = 0.0f; float* x2338 = (float*)myMalloc(1 * sizeof(float));; x2338[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2338, in_desc, x2295, filt_desc, x835, conv_desc, algo, ws_data, ws_size, x2336, out_desc, x2335)); }; float* x2341 = (float*)myGpuMalloc(x2334 * sizeof(float)); float* x2342 = (float*)myGpuMalloc(x2332 * sizeof(float)); float* x2343 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2344 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2345 = (float*)myMalloc(1 * sizeof(float));; x2345[0] = 0.0f; float* x2347 = (float*)myMalloc(1 * sizeof(float));; x2347[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2347, x2345, in_desc, x2335, out_desc, x2342, sbmv_desc, x1105, x358, 0.1, x688, x889, 1.0E-5, x2343, x2344)); }; float* x2350 = (float*)myGpuMalloc(x2334 * sizeof(float)); float* x2351 = (float*)myMalloc(1 * sizeof(float));; x2351[0] = 0.0f; float* x2353 = (float*)myMalloc(1 * sizeof(float));; x2353[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2353, x_desc, x2342, x2351, x_desc, x2342)); }; if (x2358) { } else { assert(false && "ERROR not specified"); } float* x2371 = (float*)myGpuMalloc(x2370 * sizeof(float)); float* x2372 = (float*)myMalloc(1 * sizeof(float));; x2372[0] = 0.0f; float* x2374 = (float*)myMalloc(1 * sizeof(float));; x2374[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2374, in_desc, x2342, filt_desc, x820, conv_desc, algo, ws_data, ws_size, x2372, out_desc, x2371)); }; float* x2377 = (float*)myGpuMalloc(x2370 * sizeof(float)); float* x2378 = (float*)myGpuMalloc(x2368 * sizeof(float)); float* x2379 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2380 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2381 = (float*)myMalloc(1 * sizeof(float));; x2381[0] = 0.0f; float* x2383 = (float*)myMalloc(1 * sizeof(float));; x2383[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2383, x2381, in_desc, x2371, out_desc, x2378, sbmv_desc, x619, x343, 0.1, x982, x592, 1.0E-5, x2379, x2380)); }; float* x2386 = (float*)myGpuMalloc(x2370 * sizeof(float)); float* x2387 = (float*)myMalloc(1 * sizeof(float));; x2387[0] = 0.0f; float* x2389 = (float*)myMalloc(1 * sizeof(float));; x2389[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2389, x_desc, x2378, x2387, x_desc, x2378)); }; if (x2393) { } else { assert(false && "ERROR not specified"); } float* x2405 = (float*)myGpuMalloc(x2404 * sizeof(float)); float* x2406 = (float*)myMalloc(1 * sizeof(float));; x2406[0] = 0.0f; float* x2408 = (float*)myMalloc(1 * sizeof(float));; x2408[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2408, in_desc, x2378, filt_desc, x1102, conv_desc, algo, ws_data, ws_size, x2406, out_desc, x2405)); }; float* x2411 = (float*)myGpuMalloc(x2404 * sizeof(float)); float* x2412 = (float*)myGpuMalloc(x2402 * sizeof(float)); float* x2413 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2414 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2415 = (float*)myMalloc(1 * sizeof(float));; x2415[0] = 0.0f; float* x2417 = (float*)myMalloc(1 * sizeof(float));; x2417[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2417, x2415, in_desc, x2405, out_desc, x2412, sbmv_desc, x349, x646, 0.1, x943, x1096, 1.0E-5, x2413, x2414)); }; float* x2420 = (float*)myGpuMalloc(x2404 * sizeof(float)); if (x2323) { } else { assert(false && "ERROR not specified"); } float* x2431 = (float*)myGpuMalloc(x2430 * sizeof(float)); float* x2432 = (float*)myMalloc(1 * sizeof(float));; x2432[0] = 0.0f; float* x2434 = (float*)myMalloc(1 * sizeof(float));; x2434[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2434, in_desc, x2295, filt_desc, x520, conv_desc, algo, ws_data, ws_size, x2432, out_desc, x2431)); }; float* x2437 = (float*)myGpuMalloc(x2430 * sizeof(float)); float* x2438 = (float*)myGpuMalloc(x2428 * sizeof(float)); float* x2439 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2440 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2441 = (float*)myMalloc(1 * sizeof(float));; x2441[0] = 0.0f; float* x2443 = (float*)myMalloc(1 * sizeof(float));; x2443[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2443, x2441, in_desc, x2431, out_desc, x2438, sbmv_desc, x382, x955, 0.1, x553, x928, 1.0E-5, x2439, x2440)); }; float* x2446 = (float*)myGpuMalloc(x2430 * sizeof(float)); if (x2450) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2425) x Sym(2425), res: x Const(64) x Const(1024) x Sym(2399) x Sym(2399)"); } float* x2455 = (float*)myMalloc(1 * sizeof(float));; x2455[0] = 1.0f; float* x2457 = (float*)myMalloc(1 * sizeof(float));; x2457[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2455, bias_desc, x2438, x2457, out_desc, x2412)); }; float* x2460 = (float*)myMalloc(1 * sizeof(float));; x2460[0] = 0.0f; float* x2462 = (float*)myMalloc(1 * sizeof(float));; x2462[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2462, x_desc, x2412, x2460, x_desc, x2412)); }; if (x2466) { } else { assert(false && "ERROR not specified"); } float* x2478 = (float*)myGpuMalloc(x2477 * sizeof(float)); float* x2479 = (float*)myMalloc(1 * sizeof(float));; x2479[0] = 0.0f; float* x2481 = (float*)myMalloc(1 * sizeof(float));; x2481[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2481, in_desc, x2412, filt_desc, x334, conv_desc, algo, ws_data, ws_size, x2479, out_desc, x2478)); }; float* x2484 = (float*)myGpuMalloc(x2477 * sizeof(float)); float* x2485 = (float*)myGpuMalloc(x2475 * sizeof(float)); float* x2486 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2487 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2488 = (float*)myMalloc(1 * sizeof(float));; x2488[0] = 0.0f; float* x2490 = (float*)myMalloc(1 * sizeof(float));; x2490[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2490, x2488, in_desc, x2478, out_desc, x2485, sbmv_desc, x385, x952, 0.1, x1072, x766, 1.0E-5, x2486, x2487)); }; float* x2493 = (float*)myGpuMalloc(x2477 * sizeof(float)); float* x2494 = (float*)myMalloc(1 * sizeof(float));; x2494[0] = 0.0f; float* x2496 = (float*)myMalloc(1 * sizeof(float));; x2496[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2496, x_desc, x2485, x2494, x_desc, x2485)); }; if (x2501) { } else { assert(false && "ERROR not specified"); } float* x2514 = (float*)myGpuMalloc(x2513 * sizeof(float)); float* x2515 = (float*)myMalloc(1 * sizeof(float));; x2515[0] = 0.0f; float* x2517 = (float*)myMalloc(1 * sizeof(float));; x2517[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2517, in_desc, x2485, filt_desc, x388, conv_desc, algo, ws_data, ws_size, x2515, out_desc, x2514)); }; float* x2520 = (float*)myGpuMalloc(x2513 * sizeof(float)); float* x2521 = (float*)myGpuMalloc(x2511 * sizeof(float)); float* x2522 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2523 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2524 = (float*)myMalloc(1 * sizeof(float));; x2524[0] = 0.0f; float* x2526 = (float*)myMalloc(1 * sizeof(float));; x2526[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2526, x2524, in_desc, x2514, out_desc, x2521, sbmv_desc, x1108, x583, 0.1, x895, x1006, 1.0E-5, x2522, x2523)); }; float* x2529 = (float*)myGpuMalloc(x2513 * sizeof(float)); float* x2530 = (float*)myMalloc(1 * sizeof(float));; x2530[0] = 0.0f; float* x2532 = (float*)myMalloc(1 * sizeof(float));; x2532[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2532, x_desc, x2521, x2530, x_desc, x2521)); }; if (x2536) { } else { assert(false && "ERROR not specified"); } float* x2548 = (float*)myGpuMalloc(x2547 * sizeof(float)); float* x2549 = (float*)myMalloc(1 * sizeof(float));; x2549[0] = 0.0f; float* x2551 = (float*)myMalloc(1 * sizeof(float));; x2551[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2551, in_desc, x2521, filt_desc, x463, conv_desc, algo, ws_data, ws_size, x2549, out_desc, x2548)); }; float* x2554 = (float*)myGpuMalloc(x2547 * sizeof(float)); float* x2555 = (float*)myGpuMalloc(x2545 * sizeof(float)); float* x2556 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2557 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2558 = (float*)myMalloc(1 * sizeof(float));; x2558[0] = 0.0f; float* x2560 = (float*)myMalloc(1 * sizeof(float));; x2560[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2560, x2558, in_desc, x2548, out_desc, x2555, sbmv_desc, x355, x991, 0.1, x841, x724, 1.0E-5, x2556, x2557)); }; float* x2563 = (float*)myGpuMalloc(x2547 * sizeof(float)); if (x2567) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2399) x Sym(2399), res: x Const(64) x Const(1024) x Sym(2542) x Sym(2542)"); } float* x2572 = (float*)myMalloc(1 * sizeof(float));; x2572[0] = 1.0f; float* x2574 = (float*)myMalloc(1 * sizeof(float));; x2574[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2572, bias_desc, x2412, x2574, out_desc, x2555)); }; float* x2577 = (float*)myMalloc(1 * sizeof(float));; x2577[0] = 0.0f; float* x2579 = (float*)myMalloc(1 * sizeof(float));; x2579[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2579, x_desc, x2555, x2577, x_desc, x2555)); }; if (x2583) { } else { assert(false && "ERROR not specified"); } float* x2595 = (float*)myGpuMalloc(x2594 * sizeof(float)); float* x2596 = (float*)myMalloc(1 * sizeof(float));; x2596[0] = 0.0f; float* x2598 = (float*)myMalloc(1 * sizeof(float));; x2598[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2598, in_desc, x2555, filt_desc, x949, conv_desc, algo, ws_data, ws_size, x2596, out_desc, x2595)); }; float* x2601 = (float*)myGpuMalloc(x2594 * sizeof(float)); float* x2602 = (float*)myGpuMalloc(x2592 * sizeof(float)); float* x2603 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2604 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2605 = (float*)myMalloc(1 * sizeof(float));; x2605[0] = 0.0f; float* x2607 = (float*)myMalloc(1 * sizeof(float));; x2607[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2607, x2605, in_desc, x2595, out_desc, x2602, sbmv_desc, x682, x886, 0.1, x829, x817, 1.0E-5, x2603, x2604)); }; float* x2610 = (float*)myGpuMalloc(x2594 * sizeof(float)); float* x2611 = (float*)myMalloc(1 * sizeof(float));; x2611[0] = 0.0f; float* x2613 = (float*)myMalloc(1 * sizeof(float));; x2613[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2613, x_desc, x2602, x2611, x_desc, x2602)); }; if (x2618) { } else { assert(false && "ERROR not specified"); } float* x2631 = (float*)myGpuMalloc(x2630 * sizeof(float)); float* x2632 = (float*)myMalloc(1 * sizeof(float));; x2632[0] = 0.0f; float* x2634 = (float*)myMalloc(1 * sizeof(float));; x2634[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2634, in_desc, x2602, filt_desc, x337, conv_desc, algo, ws_data, ws_size, x2632, out_desc, x2631)); }; float* x2637 = (float*)myGpuMalloc(x2630 * sizeof(float)); float* x2638 = (float*)myGpuMalloc(x2628 * sizeof(float)); float* x2639 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2640 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2641 = (float*)myMalloc(1 * sizeof(float));; x2641[0] = 0.0f; float* x2643 = (float*)myMalloc(1 * sizeof(float));; x2643[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2643, x2641, in_desc, x2631, out_desc, x2638, sbmv_desc, x979, x871, 0.1, x667, x484, 1.0E-5, x2639, x2640)); }; float* x2646 = (float*)myGpuMalloc(x2630 * sizeof(float)); float* x2647 = (float*)myMalloc(1 * sizeof(float));; x2647[0] = 0.0f; float* x2649 = (float*)myMalloc(1 * sizeof(float));; x2649[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2649, x_desc, x2638, x2647, x_desc, x2638)); }; if (x2653) { } else { assert(false && "ERROR not specified"); } float* x2665 = (float*)myGpuMalloc(x2664 * sizeof(float)); float* x2666 = (float*)myMalloc(1 * sizeof(float));; x2666[0] = 0.0f; float* x2668 = (float*)myMalloc(1 * sizeof(float));; x2668[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2668, in_desc, x2638, filt_desc, x643, conv_desc, algo, ws_data, ws_size, x2666, out_desc, x2665)); }; float* x2671 = (float*)myGpuMalloc(x2664 * sizeof(float)); float* x2672 = (float*)myGpuMalloc(x2662 * sizeof(float)); float* x2673 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2674 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2675 = (float*)myMalloc(1 * sizeof(float));; x2675[0] = 0.0f; float* x2677 = (float*)myMalloc(1 * sizeof(float));; x2677[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2677, x2675, in_desc, x2665, out_desc, x2672, sbmv_desc, x1084, x466, 0.1, x715, x859, 1.0E-5, x2673, x2674)); }; float* x2680 = (float*)myGpuMalloc(x2664 * sizeof(float)); if (x2684) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2542) x Sym(2542), res: x Const(64) x Const(1024) x Sym(2659) x Sym(2659)"); } float* x2689 = (float*)myMalloc(1 * sizeof(float));; x2689[0] = 1.0f; float* x2691 = (float*)myMalloc(1 * sizeof(float));; x2691[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2689, bias_desc, x2555, x2691, out_desc, x2672)); }; float* x2694 = (float*)myMalloc(1 * sizeof(float));; x2694[0] = 0.0f; float* x2696 = (float*)myMalloc(1 * sizeof(float));; x2696[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2696, x_desc, x2672, x2694, x_desc, x2672)); }; if (x2700) { } else { assert(false && "ERROR not specified"); } float* x2712 = (float*)myGpuMalloc(x2711 * sizeof(float)); float* x2713 = (float*)myMalloc(1 * sizeof(float));; x2713[0] = 0.0f; float* x2715 = (float*)myMalloc(1 * sizeof(float));; x2715[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2715, in_desc, x2672, filt_desc, x313, conv_desc, algo, ws_data, ws_size, x2713, out_desc, x2712)); }; float* x2718 = (float*)myGpuMalloc(x2711 * sizeof(float)); float* x2719 = (float*)myGpuMalloc(x2709 * sizeof(float)); float* x2720 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2721 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2722 = (float*)myMalloc(1 * sizeof(float));; x2722[0] = 0.0f; float* x2724 = (float*)myMalloc(1 * sizeof(float));; x2724[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2724, x2722, in_desc, x2712, out_desc, x2719, sbmv_desc, x571, x1018, 0.1, x784, x589, 1.0E-5, x2720, x2721)); }; float* x2727 = (float*)myGpuMalloc(x2711 * sizeof(float)); float* x2728 = (float*)myMalloc(1 * sizeof(float));; x2728[0] = 0.0f; float* x2730 = (float*)myMalloc(1 * sizeof(float));; x2730[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2730, x_desc, x2719, x2728, x_desc, x2719)); }; if (x2735) { } else { assert(false && "ERROR not specified"); } float* x2748 = (float*)myGpuMalloc(x2747 * sizeof(float)); float* x2749 = (float*)myMalloc(1 * sizeof(float));; x2749[0] = 0.0f; float* x2751 = (float*)myMalloc(1 * sizeof(float));; x2751[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2751, in_desc, x2719, filt_desc, x1042, conv_desc, algo, ws_data, ws_size, x2749, out_desc, x2748)); }; float* x2754 = (float*)myGpuMalloc(x2747 * sizeof(float)); float* x2755 = (float*)myGpuMalloc(x2745 * sizeof(float)); float* x2756 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2757 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2758 = (float*)myMalloc(1 * sizeof(float));; x2758[0] = 0.0f; float* x2760 = (float*)myMalloc(1 * sizeof(float));; x2760[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2760, x2758, in_desc, x2748, out_desc, x2755, sbmv_desc, x517, x703, 0.1, x853, x985, 1.0E-5, x2756, x2757)); }; float* x2763 = (float*)myGpuMalloc(x2747 * sizeof(float)); float* x2764 = (float*)myMalloc(1 * sizeof(float));; x2764[0] = 0.0f; float* x2766 = (float*)myMalloc(1 * sizeof(float));; x2766[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2766, x_desc, x2755, x2764, x_desc, x2755)); }; if (x2770) { } else { assert(false && "ERROR not specified"); } float* x2782 = (float*)myGpuMalloc(x2781 * sizeof(float)); float* x2783 = (float*)myMalloc(1 * sizeof(float));; x2783[0] = 0.0f; float* x2785 = (float*)myMalloc(1 * sizeof(float));; x2785[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2785, in_desc, x2755, filt_desc, x562, conv_desc, algo, ws_data, ws_size, x2783, out_desc, x2782)); }; float* x2788 = (float*)myGpuMalloc(x2781 * sizeof(float)); float* x2789 = (float*)myGpuMalloc(x2779 * sizeof(float)); float* x2790 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2791 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2792 = (float*)myMalloc(1 * sizeof(float));; x2792[0] = 0.0f; float* x2794 = (float*)myMalloc(1 * sizeof(float));; x2794[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2794, x2792, in_desc, x2782, out_desc, x2789, sbmv_desc, x1009, x733, 0.1, x988, x778, 1.0E-5, x2790, x2791)); }; float* x2797 = (float*)myGpuMalloc(x2781 * sizeof(float)); if (x2801) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2659) x Sym(2659), res: x Const(64) x Const(1024) x Sym(2776) x Sym(2776)"); } float* x2806 = (float*)myMalloc(1 * sizeof(float));; x2806[0] = 1.0f; float* x2808 = (float*)myMalloc(1 * sizeof(float));; x2808[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2806, bias_desc, x2672, x2808, out_desc, x2789)); }; float* x2811 = (float*)myMalloc(1 * sizeof(float));; x2811[0] = 0.0f; float* x2813 = (float*)myMalloc(1 * sizeof(float));; x2813[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2813, x_desc, x2789, x2811, x_desc, x2789)); }; if (x2817) { } else { assert(false && "ERROR not specified"); } float* x2829 = (float*)myGpuMalloc(x2828 * sizeof(float)); float* x2830 = (float*)myMalloc(1 * sizeof(float));; x2830[0] = 0.0f; float* x2832 = (float*)myMalloc(1 * sizeof(float));; x2832[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2832, in_desc, x2789, filt_desc, x361, conv_desc, algo, ws_data, ws_size, x2830, out_desc, x2829)); }; float* x2835 = (float*)myGpuMalloc(x2828 * sizeof(float)); float* x2836 = (float*)myGpuMalloc(x2826 * sizeof(float)); float* x2837 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2838 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2839 = (float*)myMalloc(1 * sizeof(float));; x2839[0] = 0.0f; float* x2841 = (float*)myMalloc(1 * sizeof(float));; x2841[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2841, x2839, in_desc, x2829, out_desc, x2836, sbmv_desc, x526, x850, 0.1, x1057, x502, 1.0E-5, x2837, x2838)); }; float* x2844 = (float*)myGpuMalloc(x2828 * sizeof(float)); float* x2845 = (float*)myMalloc(1 * sizeof(float));; x2845[0] = 0.0f; float* x2847 = (float*)myMalloc(1 * sizeof(float));; x2847[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2847, x_desc, x2836, x2845, x_desc, x2836)); }; if (x2852) { } else { assert(false && "ERROR not specified"); } float* x2865 = (float*)myGpuMalloc(x2864 * sizeof(float)); float* x2866 = (float*)myMalloc(1 * sizeof(float));; x2866[0] = 0.0f; float* x2868 = (float*)myMalloc(1 * sizeof(float));; x2868[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2868, in_desc, x2836, filt_desc, x1081, conv_desc, algo, ws_data, ws_size, x2866, out_desc, x2865)); }; float* x2871 = (float*)myGpuMalloc(x2864 * sizeof(float)); float* x2872 = (float*)myGpuMalloc(x2862 * sizeof(float)); float* x2873 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2874 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2875 = (float*)myMalloc(1 * sizeof(float));; x2875[0] = 0.0f; float* x2877 = (float*)myMalloc(1 * sizeof(float));; x2877[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2877, x2875, in_desc, x2865, out_desc, x2872, sbmv_desc, x799, x622, 0.1, x1045, x607, 1.0E-5, x2873, x2874)); }; float* x2880 = (float*)myGpuMalloc(x2864 * sizeof(float)); float* x2881 = (float*)myMalloc(1 * sizeof(float));; x2881[0] = 0.0f; float* x2883 = (float*)myMalloc(1 * sizeof(float));; x2883[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2883, x_desc, x2872, x2881, x_desc, x2872)); }; if (x2887) { } else { assert(false && "ERROR not specified"); } float* x2899 = (float*)myGpuMalloc(x2898 * sizeof(float)); float* x2900 = (float*)myMalloc(1 * sizeof(float));; x2900[0] = 0.0f; float* x2902 = (float*)myMalloc(1 * sizeof(float));; x2902[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2902, in_desc, x2872, filt_desc, x958, conv_desc, algo, ws_data, ws_size, x2900, out_desc, x2899)); }; float* x2905 = (float*)myGpuMalloc(x2898 * sizeof(float)); float* x2906 = (float*)myGpuMalloc(x2896 * sizeof(float)); float* x2907 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2908 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x2909 = (float*)myMalloc(1 * sizeof(float));; x2909[0] = 0.0f; float* x2911 = (float*)myMalloc(1 * sizeof(float));; x2911[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2911, x2909, in_desc, x2899, out_desc, x2906, sbmv_desc, x472, x655, 0.1, x922, x1111, 1.0E-5, x2907, x2908)); }; float* x2914 = (float*)myGpuMalloc(x2898 * sizeof(float)); if (x2918) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2776) x Sym(2776), res: x Const(64) x Const(1024) x Sym(2893) x Sym(2893)"); } float* x2923 = (float*)myMalloc(1 * sizeof(float));; x2923[0] = 1.0f; float* x2925 = (float*)myMalloc(1 * sizeof(float));; x2925[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x2923, bias_desc, x2789, x2925, out_desc, x2906)); }; float* x2928 = (float*)myMalloc(1 * sizeof(float));; x2928[0] = 0.0f; float* x2930 = (float*)myMalloc(1 * sizeof(float));; x2930[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2930, x_desc, x2906, x2928, x_desc, x2906)); }; if (x2934) { } else { assert(false && "ERROR not specified"); } float* x2946 = (float*)myGpuMalloc(x2945 * sizeof(float)); float* x2947 = (float*)myMalloc(1 * sizeof(float));; x2947[0] = 0.0f; float* x2949 = (float*)myMalloc(1 * sizeof(float));; x2949[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2949, in_desc, x2906, filt_desc, x748, conv_desc, algo, ws_data, ws_size, x2947, out_desc, x2946)); }; float* x2952 = (float*)myGpuMalloc(x2945 * sizeof(float)); float* x2953 = (float*)myGpuMalloc(x2943 * sizeof(float)); float* x2954 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2955 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2956 = (float*)myMalloc(1 * sizeof(float));; x2956[0] = 0.0f; float* x2958 = (float*)myMalloc(1 * sizeof(float));; x2958[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2958, x2956, in_desc, x2946, out_desc, x2953, sbmv_desc, x550, x1054, 0.1, x535, x823, 1.0E-5, x2954, x2955)); }; float* x2961 = (float*)myGpuMalloc(x2945 * sizeof(float)); float* x2962 = (float*)myMalloc(1 * sizeof(float));; x2962[0] = 0.0f; float* x2964 = (float*)myMalloc(1 * sizeof(float));; x2964[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x2964, x_desc, x2953, x2962, x_desc, x2953)); }; if (x2969) { } else { assert(false && "ERROR not specified"); } float* x2982 = (float*)myGpuMalloc(x2981 * sizeof(float)); float* x2983 = (float*)myMalloc(1 * sizeof(float));; x2983[0] = 0.0f; float* x2985 = (float*)myMalloc(1 * sizeof(float));; x2985[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x2985, in_desc, x2953, filt_desc, x973, conv_desc, algo, ws_data, ws_size, x2983, out_desc, x2982)); }; float* x2988 = (float*)myGpuMalloc(x2981 * sizeof(float)); float* x2989 = (float*)myGpuMalloc(x2979 * sizeof(float)); float* x2990 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2991 = (float*)myGpuMalloc(256 * sizeof(float)); float* x2992 = (float*)myMalloc(1 * sizeof(float));; x2992[0] = 0.0f; float* x2994 = (float*)myMalloc(1 * sizeof(float));; x2994[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x2994, x2992, in_desc, x2982, out_desc, x2989, sbmv_desc, x718, x862, 0.1, x505, x1015, 1.0E-5, x2990, x2991)); }; float* x2997 = (float*)myGpuMalloc(x2981 * sizeof(float)); float* x2998 = (float*)myMalloc(1 * sizeof(float));; x2998[0] = 0.0f; float* x3000 = (float*)myMalloc(1 * sizeof(float));; x3000[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3000, x_desc, x2989, x2998, x_desc, x2989)); }; if (x3004) { } else { assert(false && "ERROR not specified"); } float* x3016 = (float*)myGpuMalloc(x3015 * sizeof(float)); float* x3017 = (float*)myMalloc(1 * sizeof(float));; x3017[0] = 0.0f; float* x3019 = (float*)myMalloc(1 * sizeof(float));; x3019[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3019, in_desc, x2989, filt_desc, x586, conv_desc, algo, ws_data, ws_size, x3017, out_desc, x3016)); }; float* x3022 = (float*)myGpuMalloc(x3015 * sizeof(float)); float* x3023 = (float*)myGpuMalloc(x3013 * sizeof(float)); float* x3024 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x3025 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x3026 = (float*)myMalloc(1 * sizeof(float));; x3026[0] = 0.0f; float* x3028 = (float*)myMalloc(1 * sizeof(float));; x3028[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3028, x3026, in_desc, x3016, out_desc, x3023, sbmv_desc, x1039, x574, 0.1, x661, x844, 1.0E-5, x3024, x3025)); }; float* x3031 = (float*)myGpuMalloc(x3015 * sizeof(float)); if (x3035) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2893) x Sym(2893), res: x Const(64) x Const(1024) x Sym(3010) x Sym(3010)"); } float* x3040 = (float*)myMalloc(1 * sizeof(float));; x3040[0] = 1.0f; float* x3042 = (float*)myMalloc(1 * sizeof(float));; x3042[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3040, bias_desc, x2906, x3042, out_desc, x3023)); }; float* x3045 = (float*)myMalloc(1 * sizeof(float));; x3045[0] = 0.0f; float* x3047 = (float*)myMalloc(1 * sizeof(float));; x3047[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3047, x_desc, x3023, x3045, x_desc, x3023)); }; if (x3051) { } else { assert(false && "ERROR not specified"); } float* x3063 = (float*)myGpuMalloc(x3062 * sizeof(float)); float* x3064 = (float*)myMalloc(1 * sizeof(float));; x3064[0] = 0.0f; float* x3066 = (float*)myMalloc(1 * sizeof(float));; x3066[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3066, in_desc, x3023, filt_desc, x712, conv_desc, algo, ws_data, ws_size, x3064, out_desc, x3063)); }; float* x3069 = (float*)myGpuMalloc(x3062 * sizeof(float)); float* x3070 = (float*)myGpuMalloc(x3060 * sizeof(float)); float* x3071 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3072 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3073 = (float*)myMalloc(1 * sizeof(float));; x3073[0] = 0.0f; float* x3075 = (float*)myMalloc(1 * sizeof(float));; x3075[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3075, x3073, in_desc, x3063, out_desc, x3070, sbmv_desc, x898, x967, 0.1, x496, x658, 1.0E-5, x3071, x3072)); }; float* x3078 = (float*)myGpuMalloc(x3062 * sizeof(float)); float* x3079 = (float*)myMalloc(1 * sizeof(float));; x3079[0] = 0.0f; float* x3081 = (float*)myMalloc(1 * sizeof(float));; x3081[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3081, x_desc, x3070, x3079, x_desc, x3070)); }; if (x3086) { } else { assert(false && "ERROR not specified"); } float* x3099 = (float*)myGpuMalloc(x3098 * sizeof(float)); float* x3100 = (float*)myMalloc(1 * sizeof(float));; x3100[0] = 0.0f; float* x3102 = (float*)myMalloc(1 * sizeof(float));; x3102[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3102, in_desc, x3070, filt_desc, x397, conv_desc, algo, ws_data, ws_size, x3100, out_desc, x3099)); }; float* x3105 = (float*)myGpuMalloc(x3098 * sizeof(float)); float* x3106 = (float*)myGpuMalloc(x3096 * sizeof(float)); float* x3107 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3108 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3109 = (float*)myMalloc(1 * sizeof(float));; x3109[0] = 0.0f; float* x3111 = (float*)myMalloc(1 * sizeof(float));; x3111[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3111, x3109, in_desc, x3099, out_desc, x3106, sbmv_desc, x910, x772, 0.1, x634, x445, 1.0E-5, x3107, x3108)); }; float* x3114 = (float*)myGpuMalloc(x3098 * sizeof(float)); float* x3115 = (float*)myMalloc(1 * sizeof(float));; x3115[0] = 0.0f; float* x3117 = (float*)myMalloc(1 * sizeof(float));; x3117[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3117, x_desc, x3106, x3115, x_desc, x3106)); }; if (x3121) { } else { assert(false && "ERROR not specified"); } float* x3133 = (float*)myGpuMalloc(x3132 * sizeof(float)); float* x3134 = (float*)myMalloc(1 * sizeof(float));; x3134[0] = 0.0f; float* x3136 = (float*)myMalloc(1 * sizeof(float));; x3136[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3136, in_desc, x3106, filt_desc, x931, conv_desc, algo, ws_data, ws_size, x3134, out_desc, x3133)); }; float* x3139 = (float*)myGpuMalloc(x3132 * sizeof(float)); float* x3140 = (float*)myGpuMalloc(x3130 * sizeof(float)); float* x3141 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3142 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3143 = (float*)myMalloc(1 * sizeof(float));; x3143[0] = 0.0f; float* x3145 = (float*)myMalloc(1 * sizeof(float));; x3145[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3145, x3143, in_desc, x3133, out_desc, x3140, sbmv_desc, x1012, x481, 0.1, x640, x874, 1.0E-5, x3141, x3142)); }; float* x3148 = (float*)myGpuMalloc(x3132 * sizeof(float)); if (x3051) { } else { assert(false && "ERROR not specified"); } float* x3159 = (float*)myGpuMalloc(x3158 * sizeof(float)); float* x3160 = (float*)myMalloc(1 * sizeof(float));; x3160[0] = 0.0f; float* x3162 = (float*)myMalloc(1 * sizeof(float));; x3162[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 1024, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3162, in_desc, x3023, filt_desc, x937, conv_desc, algo, ws_data, ws_size, x3160, out_desc, x3159)); }; float* x3165 = (float*)myGpuMalloc(x3158 * sizeof(float)); float* x3166 = (float*)myGpuMalloc(x3156 * sizeof(float)); float* x3167 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3168 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3169 = (float*)myMalloc(1 * sizeof(float));; x3169[0] = 0.0f; float* x3171 = (float*)myMalloc(1 * sizeof(float));; x3171[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3171, x3169, in_desc, x3159, out_desc, x3166, sbmv_desc, x814, x616, 0.1, x487, x670, 1.0E-5, x3167, x3168)); }; float* x3174 = (float*)myGpuMalloc(x3158 * sizeof(float)); if (x3178) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3153) x Sym(3153), res: x Const(64) x Const(2048) x Sym(3127) x Sym(3127)"); } float* x3183 = (float*)myMalloc(1 * sizeof(float));; x3183[0] = 1.0f; float* x3185 = (float*)myMalloc(1 * sizeof(float));; x3185[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3183, bias_desc, x3166, x3185, out_desc, x3140)); }; float* x3188 = (float*)myMalloc(1 * sizeof(float));; x3188[0] = 0.0f; float* x3190 = (float*)myMalloc(1 * sizeof(float));; x3190[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3190, x_desc, x3140, x3188, x_desc, x3140)); }; if (x3194) { } else { assert(false && "ERROR not specified"); } float* x3206 = (float*)myGpuMalloc(x3205 * sizeof(float)); float* x3207 = (float*)myMalloc(1 * sizeof(float));; x3207[0] = 0.0f; float* x3209 = (float*)myMalloc(1 * sizeof(float));; x3209[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3209, in_desc, x3140, filt_desc, x940, conv_desc, algo, ws_data, ws_size, x3207, out_desc, x3206)); }; float* x3212 = (float*)myGpuMalloc(x3205 * sizeof(float)); float* x3213 = (float*)myGpuMalloc(x3203 * sizeof(float)); float* x3214 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3215 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3216 = (float*)myMalloc(1 * sizeof(float));; x3216[0] = 0.0f; float* x3218 = (float*)myMalloc(1 * sizeof(float));; x3218[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3218, x3216, in_desc, x3206, out_desc, x3213, sbmv_desc, x433, x706, 0.1, x757, x490, 1.0E-5, x3214, x3215)); }; float* x3221 = (float*)myGpuMalloc(x3205 * sizeof(float)); float* x3222 = (float*)myMalloc(1 * sizeof(float));; x3222[0] = 0.0f; float* x3224 = (float*)myMalloc(1 * sizeof(float));; x3224[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3224, x_desc, x3213, x3222, x_desc, x3213)); }; if (x3229) { } else { assert(false && "ERROR not specified"); } float* x3242 = (float*)myGpuMalloc(x3241 * sizeof(float)); float* x3243 = (float*)myMalloc(1 * sizeof(float));; x3243[0] = 0.0f; float* x3245 = (float*)myMalloc(1 * sizeof(float));; x3245[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3245, in_desc, x3213, filt_desc, x760, conv_desc, algo, ws_data, ws_size, x3243, out_desc, x3242)); }; float* x3248 = (float*)myGpuMalloc(x3241 * sizeof(float)); float* x3249 = (float*)myGpuMalloc(x3239 * sizeof(float)); float* x3250 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3251 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3252 = (float*)myMalloc(1 * sizeof(float));; x3252[0] = 0.0f; float* x3254 = (float*)myMalloc(1 * sizeof(float));; x3254[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3254, x3252, in_desc, x3242, out_desc, x3249, sbmv_desc, x775, x493, 0.1, x709, x880, 1.0E-5, x3250, x3251)); }; float* x3257 = (float*)myGpuMalloc(x3241 * sizeof(float)); float* x3258 = (float*)myMalloc(1 * sizeof(float));; x3258[0] = 0.0f; float* x3260 = (float*)myMalloc(1 * sizeof(float));; x3260[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3260, x_desc, x3249, x3258, x_desc, x3249)); }; if (x3264) { } else { assert(false && "ERROR not specified"); } float* x3276 = (float*)myGpuMalloc(x3275 * sizeof(float)); float* x3277 = (float*)myMalloc(1 * sizeof(float));; x3277[0] = 0.0f; float* x3279 = (float*)myMalloc(1 * sizeof(float));; x3279[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3279, in_desc, x3249, filt_desc, x436, conv_desc, algo, ws_data, ws_size, x3277, out_desc, x3276)); }; float* x3282 = (float*)myGpuMalloc(x3275 * sizeof(float)); float* x3283 = (float*)myGpuMalloc(x3273 * sizeof(float)); float* x3284 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3285 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3286 = (float*)myMalloc(1 * sizeof(float));; x3286[0] = 0.0f; float* x3288 = (float*)myMalloc(1 * sizeof(float));; x3288[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3288, x3286, in_desc, x3276, out_desc, x3283, sbmv_desc, x577, x727, 0.1, x499, x1030, 1.0E-5, x3284, x3285)); }; float* x3291 = (float*)myGpuMalloc(x3275 * sizeof(float)); if (x3295) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3127) x Sym(3127), res: x Const(64) x Const(2048) x Sym(3270) x Sym(3270)"); } float* x3300 = (float*)myMalloc(1 * sizeof(float));; x3300[0] = 1.0f; float* x3302 = (float*)myMalloc(1 * sizeof(float));; x3302[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3300, bias_desc, x3140, x3302, out_desc, x3283)); }; float* x3305 = (float*)myMalloc(1 * sizeof(float));; x3305[0] = 0.0f; float* x3307 = (float*)myMalloc(1 * sizeof(float));; x3307[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3307, x_desc, x3283, x3305, x_desc, x3283)); }; if (x3311) { } else { assert(false && "ERROR not specified"); } float* x3323 = (float*)myGpuMalloc(x3322 * sizeof(float)); float* x3324 = (float*)myMalloc(1 * sizeof(float));; x3324[0] = 0.0f; float* x3326 = (float*)myMalloc(1 * sizeof(float));; x3326[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3326, in_desc, x3283, filt_desc, x1090, conv_desc, algo, ws_data, ws_size, x3324, out_desc, x3323)); }; float* x3329 = (float*)myGpuMalloc(x3322 * sizeof(float)); float* x3330 = (float*)myGpuMalloc(x3320 * sizeof(float)); float* x3331 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3332 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3333 = (float*)myMalloc(1 * sizeof(float));; x3333[0] = 0.0f; float* x3335 = (float*)myMalloc(1 * sizeof(float));; x3335[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3335, x3333, in_desc, x3323, out_desc, x3330, sbmv_desc, x340, x529, 0.1, x934, x1060, 1.0E-5, x3331, x3332)); }; float* x3338 = (float*)myGpuMalloc(x3322 * sizeof(float)); float* x3339 = (float*)myMalloc(1 * sizeof(float));; x3339[0] = 0.0f; float* x3341 = (float*)myMalloc(1 * sizeof(float));; x3341[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3341, x_desc, x3330, x3339, x_desc, x3330)); }; if (x3346) { } else { assert(false && "ERROR not specified"); } float* x3359 = (float*)myGpuMalloc(x3358 * sizeof(float)); float* x3360 = (float*)myMalloc(1 * sizeof(float));; x3360[0] = 0.0f; float* x3362 = (float*)myMalloc(1 * sizeof(float));; x3362[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3362, in_desc, x3330, filt_desc, x379, conv_desc, algo, ws_data, ws_size, x3360, out_desc, x3359)); }; float* x3365 = (float*)myGpuMalloc(x3358 * sizeof(float)); float* x3366 = (float*)myGpuMalloc(x3356 * sizeof(float)); float* x3367 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3368 = (float*)myGpuMalloc(512 * sizeof(float)); float* x3369 = (float*)myMalloc(1 * sizeof(float));; x3369[0] = 0.0f; float* x3371 = (float*)myMalloc(1 * sizeof(float));; x3371[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3371, x3369, in_desc, x3359, out_desc, x3366, sbmv_desc, x877, x802, 0.1, x331, x901, 1.0E-5, x3367, x3368)); }; float* x3374 = (float*)myGpuMalloc(x3358 * sizeof(float)); float* x3375 = (float*)myMalloc(1 * sizeof(float));; x3375[0] = 0.0f; float* x3377 = (float*)myMalloc(1 * sizeof(float));; x3377[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3377, x_desc, x3366, x3375, x_desc, x3366)); }; if (x3381) { } else { assert(false && "ERROR not specified"); } float* x3393 = (float*)myGpuMalloc(x3392 * sizeof(float)); float* x3394 = (float*)myMalloc(1 * sizeof(float));; x3394[0] = 0.0f; float* x3396 = (float*)myMalloc(1 * sizeof(float));; x3396[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x3396, in_desc, x3366, filt_desc, x394, conv_desc, algo, ws_data, ws_size, x3394, out_desc, x3393)); }; float* x3399 = (float*)myGpuMalloc(x3392 * sizeof(float)); float* x3400 = (float*)myGpuMalloc(x3390 * sizeof(float)); float* x3401 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3402 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x3403 = (float*)myMalloc(1 * sizeof(float));; x3403[0] = 0.0f; float* x3405 = (float*)myMalloc(1 * sizeof(float));; x3405[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3405, x3403, in_desc, x3393, out_desc, x3400, sbmv_desc, x604, x838, 0.1, x1075, x664, 1.0E-5, x3401, x3402)); }; float* x3408 = (float*)myGpuMalloc(x3392 * sizeof(float)); if (x3412) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3270) x Sym(3270), res: x Const(64) x Const(2048) x Sym(3387) x Sym(3387)"); } float* x3417 = (float*)myMalloc(1 * sizeof(float));; x3417[0] = 1.0f; float* x3419 = (float*)myMalloc(1 * sizeof(float));; x3419[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3417, bias_desc, x3283, x3419, out_desc, x3400)); }; float* x3422 = (float*)myMalloc(1 * sizeof(float));; x3422[0] = 0.0f; float* x3424 = (float*)myMalloc(1 * sizeof(float));; x3424[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x3424, x_desc, x3400, x3422, x_desc, x3400)); }; if (x3428) { } else { assert(false && "Image too small for averagePool_batch: x Const(64) x Const(2048) x Sym(3387) x Sym(3387)|(2,2)"); } float* x3433 = (float*)myMalloc(1 * sizeof(float));; x3433[0] = 0.0f; float* x3435 = (float*)myMalloc(1 * sizeof(float));; x3435[0] = 1.0f; float* x3445 = (float*)myGpuMalloc(x3444 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3439, x3439)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 1, 1 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x3435, in_desc, x3400, x3433, out_desc, x3445)); }; float* x3447 = (float*)myGpuMalloc(x3444 * sizeof(float)); int32_t x3448 = 0; int32_t x3449 = 1; x3449 *= 64; x3448 += 1; int32_t x3452 = x3448; bool x3453 = x3452 >= 2; if (x3453) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3459 = x3452 == 0; if (x3459) { int32_t x3460 = x3449; bool x3461 = x3460 == x3442; if (x3461) { } else { assert(false && "must same size!!"); } } else { } int32_t x3468 = x3449; // foward of gemm // gemm: List(Const(64), Sym(3469)), Vector(Const(10), Const(2048)) float* x3473 = (float*)myGpuMalloc(640 * sizeof(float)); float* x3474 = (float*)myMalloc(1 * sizeof(float));; x3474[0] = 0.0f; float* x3476 = (float*)myMalloc(1 * sizeof(float));; x3476[0] = 1.0f; CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, 10,64,2048,x3476,x976,2048,x3445,2048,x3474,x3473,10)); float* x3479 = (float*)myGpuMalloc(640 * sizeof(float)); float* x3480 = (float*)myMalloc(1 * sizeof(float));; x3480[0] = 1.0f; float* x3482 = (float*)myMalloc(1 * sizeof(float));; x3482[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 10, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3480, bias_desc, x439, x3482, out_desc, x3473)); }; int32_t x3485 = 0; int32_t x3486 = 1; x3486 *= 64; x3486 *= 10; x3486 *= 1; x3486 *= 1; int32_t x3491 = x3485; bool x3492 = x3491 >= 2; if (x3492) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3497 = x3491 == 0; if (x3497) { int32_t x3498 = x3486; bool x3499 = x3498 == 640; if (x3499) { } else { assert(false && "must same size!!"); } } else { } float* x3506 = (float*)myMalloc(1 * sizeof(float));; x3506[0] = 0.0f; float* x3508 = (float*)myMalloc(1 * sizeof(float));; x3508[0] = 1.0f; float* x3510 = (float*)myGpuMalloc(640 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnSoftmaxForward( cudnnHandle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_CHANNEL, x3508, x_desc, x3473, x3506, x_desc, x3510)); }; int32_t x3512 = 0; int32_t x3513 = 1; x3513 *= 64; x3513 *= 10; int32_t x3516 = x3512; bool x3517 = x3516 >= 2; if (x3517) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3522 = x3516 == 0; if (x3522) { int32_t x3523 = x3513; bool x3524 = x3523 == 640; if (x3524) { } else { assert(false && "must same size!!"); } } else { } float* x3531 = (float*)myGpuMalloc(640 * sizeof(float)); float* x3532 = (float*)myGpuMalloc(64 * sizeof(float)); nllLoss<<<64, 1>>>(x3510, 10, x3532, x1405); float* x3534 = (float*)myGpuMalloc(64 * sizeof(float)); int32_t x3535 = 0; int32_t x3536 = 1; x3536 *= 64; x3536 *= 1; x3536 *= 1; x3536 *= 1; int32_t x3541 = x3535; bool x3542 = x3541 >= 2; if (x3542) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3547 = x3541 == 0; if (x3547) { int32_t x3548 = x3536; bool x3549 = x3548 == 64; if (x3549) { } else { assert(false && "must same size!!"); } } else { } float* x3556 = (float*)myGpuMalloc(1 * sizeof(float)); float* x3557 = (float*)myMalloc(1 * sizeof(float));; x3557[0] = 0.0f; float* x3559 = (float*)myMalloc(1 * sizeof(float));; x3559[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, 1, 1)); cudnnReduceTensorDescriptor_t reduce_desc; CUDNN_CALL(cudnnCreateReduceTensorDescriptor(&reduce_desc)); CUDNN_CALL(cudnnSetReduceTensorDescriptor( reduce_desc, CUDNN_REDUCE_TENSOR_AVG, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN, CUDNN_REDUCE_TENSOR_NO_INDICES, CUDNN_32BIT_INDICES)); void *indices = nullptr; // Don't store indices. // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetReductionWorkspaceSize( cudnnHandle, reduce_desc, x_desc, out_desc, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnReduceTensor( cudnnHandle, reduce_desc, indices, 0, ws_data, ws_size, x3559, x_desc, x3532, x3557, out_desc, x3556)); }; int32_t x3562 = 0; int32_t x3563 = 1; x3563 *= 1; int32_t x3565 = x3562; bool x3566 = x3565 >= 2; if (x3566) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3571 = x3565 == 0; if (x3571) { int32_t x3572 = x3563; bool x3573 = x3572 == 1; if (x3573) { } else { assert(false && "must same size!!"); } } else { } float* x3580 = (float*)myGpuMalloc(1 * sizeof(float)); // make sure the size of loss is 1 arrayFill<<<28, 512>>>(x3580, 1.0f, 1); // backend is lantern.TensorDslCudnn$BackendCudnn@22cd45ab CUDA_CALL(cudaMemcpy(x1410, x3556, 1 * sizeof(float), cudaMemcpyDeviceToDevice)); // 'mean' gradient // backprop for mean op float x3587 = x3580[0]; float x3588 = x3587 / 64.0f; addScalar<<<28, 512>>>(x3534, x3534, x3588, 64); // 'nllLossB' gradient. nllLoss_grad<<<64, 1>>>(10, x3534, x1405, x3531); int32_t x3592 = 0; int32_t x3593 = 1; x3593 *= 64; x3593 *= 10; x3593 *= 1; x3593 *= 1; int32_t x3598 = x3592; bool x3599 = x3598 >= 2; if (x3599) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3604 = x3598 == 0; if (x3604) { int32_t x3605 = x3593; bool x3606 = x3605 == 640; if (x3606) { } else { assert(false && "must same size!!"); } } else { } int32_t x3613 = 0; int32_t x3614 = 1; x3614 *= 64; x3614 *= 10; x3614 *= 1; x3614 *= 1; int32_t x3619 = x3613; bool x3620 = x3619 >= 2; if (x3620) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3625 = x3619 == 0; if (x3625) { int32_t x3626 = x3614; bool x3627 = x3626 == 640; if (x3627) { } else { assert(false && "must same size!!"); } } else { } int32_t x3634 = 0; int32_t x3635 = 1; x3635 *= 64; x3635 *= 10; x3635 *= 1; x3635 *= 1; int32_t x3640 = x3634; bool x3641 = x3640 >= 2; if (x3641) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3646 = x3640 == 0; if (x3646) { int32_t x3647 = x3635; bool x3648 = x3647 == 640; if (x3648) { } else { assert(false && "must same size!!"); } } else { } int32_t x3655 = 0; int32_t x3656 = 1; x3656 *= 64; x3656 *= 10; x3656 *= 1; x3656 *= 1; int32_t x3661 = x3655; bool x3662 = x3661 >= 2; if (x3662) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x3667 = x3661 == 0; if (x3667) { int32_t x3668 = x3656; bool x3669 = x3668 == 640; if (x3669) { } else { assert(false && "must same size!!"); } } else { } float* x3676 = (float*)myMalloc(1 * sizeof(float));; x3676[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnSoftmaxBackward( cudnnHandle, CUDNN_SOFTMAX_LOG, CUDNN_SOFTMAX_MODE_CHANNEL, x3676, x_desc, x3510, x_desc, x3531, x3676, x_desc, x3479)); }; float* x3679 = (float*)myMalloc(1 * sizeof(float));; x3679[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 10, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, 1, 1)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x3679, grad_out_desc, x3479, x3679, grad_bias_desc, x1155)); }; // backprop for gemm List(Const(64), Sym(3469)), Vector(Const(10), Const(2048)) float* x3683 = (float*)myMalloc(1 * sizeof(float));; x3683[0] = 1.0f; float* x3685 = (float*)myMalloc(1 * sizeof(float));; x3685[0] = 1.0f; // backprop of gemm int32_t x3469 = x3442 / x3468; CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, x3469,64,10,x3683,x976,x3469,x3479,10,x3685,x3447,x3469)); CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, x3469,10,64,x3683,x3445,x3469,x3479,10,x3685,x1334,x3469)); float* x3690 = (float*)myMalloc(1 * sizeof(float));; x3690[0] = 0.0f; float* x3692 = (float*)myMalloc(1 * sizeof(float));; x3692[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3439, x3439)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 1, 1 )); CUDNN_CALL(cudnnPoolingBackward( cudnnHandle, poolingDesc, x3692, out_desc, x3445, out_desc, x3447, in_desc, x3400 , x3690, in_desc, x3408)); }; float* x3695 = (float*)myMalloc(1 * sizeof(float));; x3695[0] = 1.0f; float* x3697 = (float*)myMalloc(1 * sizeof(float));; x3697[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3695, x_desc, x3400, x_desc, x3408, x_desc, x3400, x3697, x_desc, x3408)); }; if (x3701) { if (x3704) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3387) x Sym(3387), res: x Const(64) x Const(2048) x Sym(3270) x Sym(3270)"); } float* x3709 = (float*)myMalloc(1 * sizeof(float));; x3709[0] = 1.0f; float* x3711 = (float*)myMalloc(1 * sizeof(float));; x3711[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3709, bias_desc, x3408, x3711, out_desc, x3291)); }; } else { float* x3715 = (float*)myMalloc(1 * sizeof(float));; x3715[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x3715, grad_out_desc, x3408, x3715, grad_bias_desc, x3291)); }; } float* x3720 = (float*)myMalloc(1 * sizeof(float));; x3720[0] = 0.0f; float* x3722 = (float*)myMalloc(1 * sizeof(float));; x3722[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3722, x3722, x3722, x3722, in_desc, x3393, out_desc, x3408, in_desc, x3399, sbmv_desc, x604, x1210,x1288, 1.0E-5, x3401, x3402)); }; // conv2D back-propagate float* x3726 = (float*)myMalloc(1 * sizeof(float));; x3726[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3726, filt_desc, x394, grad_out_desc, x3399, conv_desc, algo, ws_data, ws_size, x3726, grad_in_desc, x3374)); }; float* x3729 = (float*)myMalloc(1 * sizeof(float));; x3729[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3387, x3387)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3729, in_desc, x3366, grad_out_desc, x3399, conv_desc, algo, ws_data, ws_size, x3729, grad_filt_desc, x1140)); }; float* x3732 = (float*)myMalloc(1 * sizeof(float));; x3732[0] = 1.0f; float* x3734 = (float*)myMalloc(1 * sizeof(float));; x3734[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3732, x_desc, x3366, x_desc, x3374, x_desc, x3366, x3734, x_desc, x3374)); }; float* x3737 = (float*)myMalloc(1 * sizeof(float));; x3737[0] = 0.0f; float* x3739 = (float*)myMalloc(1 * sizeof(float));; x3739[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3739, x3739, x3739, x3739, in_desc, x3359, out_desc, x3374, in_desc, x3365, sbmv_desc, x877, x1301,x1276, 1.0E-5, x3367, x3368)); }; // conv2D back-propagate float* x3743 = (float*)myMalloc(1 * sizeof(float));; x3743[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3743, filt_desc, x379, grad_out_desc, x3365, conv_desc, algo, ws_data, ws_size, x3743, grad_in_desc, x3338)); }; float* x3746 = (float*)myMalloc(1 * sizeof(float));; x3746[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3353, x3353)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3746, in_desc, x3330, grad_out_desc, x3365, conv_desc, algo, ws_data, ws_size, x3746, grad_filt_desc, x1135)); }; float* x3749 = (float*)myMalloc(1 * sizeof(float));; x3749[0] = 1.0f; float* x3751 = (float*)myMalloc(1 * sizeof(float));; x3751[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3749, x_desc, x3330, x_desc, x3338, x_desc, x3330, x3751, x_desc, x3338)); }; float* x3754 = (float*)myMalloc(1 * sizeof(float));; x3754[0] = 0.0f; float* x3756 = (float*)myMalloc(1 * sizeof(float));; x3756[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3756, x3756, x3756, x3756, in_desc, x3323, out_desc, x3338, in_desc, x3329, sbmv_desc, x340, x1122,x1185, 1.0E-5, x3331, x3332)); }; // conv2D back-propagate float* x3760 = (float*)myMalloc(1 * sizeof(float));; x3760[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3760, filt_desc, x1090, grad_out_desc, x3329, conv_desc, algo, ws_data, ws_size, x3760, grad_in_desc, x3291)); }; float* x3763 = (float*)myMalloc(1 * sizeof(float));; x3763[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3317, x3317)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3763, in_desc, x3283, grad_out_desc, x3329, conv_desc, algo, ws_data, ws_size, x3763, grad_filt_desc, x1372)); }; float* x3766 = (float*)myMalloc(1 * sizeof(float));; x3766[0] = 1.0f; float* x3768 = (float*)myMalloc(1 * sizeof(float));; x3768[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3766, x_desc, x3283, x_desc, x3291, x_desc, x3283, x3768, x_desc, x3291)); }; if (x3772) { if (x3774) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3270) x Sym(3270), res: x Const(64) x Const(2048) x Sym(3127) x Sym(3127)"); } float* x3779 = (float*)myMalloc(1 * sizeof(float));; x3779[0] = 1.0f; float* x3781 = (float*)myMalloc(1 * sizeof(float));; x3781[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3779, bias_desc, x3291, x3781, out_desc, x3148)); }; } else { float* x3785 = (float*)myMalloc(1 * sizeof(float));; x3785[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x3785, grad_out_desc, x3291, x3785, grad_bias_desc, x3148)); }; } float* x3790 = (float*)myMalloc(1 * sizeof(float));; x3790[0] = 0.0f; float* x3792 = (float*)myMalloc(1 * sizeof(float));; x3792[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3792, x3792, x3792, x3792, in_desc, x3276, out_desc, x3291, in_desc, x3282, sbmv_desc, x577, x1201,x1251, 1.0E-5, x3284, x3285)); }; // conv2D back-propagate float* x3796 = (float*)myMalloc(1 * sizeof(float));; x3796[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3796, filt_desc, x436, grad_out_desc, x3282, conv_desc, algo, ws_data, ws_size, x3796, grad_in_desc, x3257)); }; float* x3799 = (float*)myMalloc(1 * sizeof(float));; x3799[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3270, x3270)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3799, in_desc, x3249, grad_out_desc, x3282, conv_desc, algo, ws_data, ws_size, x3799, grad_filt_desc, x1154)); }; float* x3802 = (float*)myMalloc(1 * sizeof(float));; x3802[0] = 1.0f; float* x3804 = (float*)myMalloc(1 * sizeof(float));; x3804[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3802, x_desc, x3249, x_desc, x3257, x_desc, x3249, x3804, x_desc, x3257)); }; float* x3807 = (float*)myMalloc(1 * sizeof(float));; x3807[0] = 0.0f; float* x3809 = (float*)myMalloc(1 * sizeof(float));; x3809[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3809, x3809, x3809, x3809, in_desc, x3242, out_desc, x3257, in_desc, x3248, sbmv_desc, x775, x1267,x1173, 1.0E-5, x3250, x3251)); }; // conv2D back-propagate float* x3813 = (float*)myMalloc(1 * sizeof(float));; x3813[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3813, filt_desc, x760, grad_out_desc, x3248, conv_desc, algo, ws_data, ws_size, x3813, grad_in_desc, x3221)); }; float* x3816 = (float*)myMalloc(1 * sizeof(float));; x3816[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3236, x3236)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3816, in_desc, x3213, grad_out_desc, x3248, conv_desc, algo, ws_data, ws_size, x3816, grad_filt_desc, x1262)); }; float* x3819 = (float*)myMalloc(1 * sizeof(float));; x3819[0] = 1.0f; float* x3821 = (float*)myMalloc(1 * sizeof(float));; x3821[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3819, x_desc, x3213, x_desc, x3221, x_desc, x3213, x3821, x_desc, x3221)); }; float* x3824 = (float*)myMalloc(1 * sizeof(float));; x3824[0] = 0.0f; float* x3826 = (float*)myMalloc(1 * sizeof(float));; x3826[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3826, x3826, x3826, x3826, in_desc, x3206, out_desc, x3221, in_desc, x3212, sbmv_desc, x433, x1153,x1244, 1.0E-5, x3214, x3215)); }; // conv2D back-propagate float* x3830 = (float*)myMalloc(1 * sizeof(float));; x3830[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3830, filt_desc, x940, grad_out_desc, x3212, conv_desc, algo, ws_data, ws_size, x3830, grad_in_desc, x3148)); }; float* x3833 = (float*)myMalloc(1 * sizeof(float));; x3833[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 2048, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3200, x3200)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3833, in_desc, x3140, grad_out_desc, x3212, conv_desc, algo, ws_data, ws_size, x3833, grad_filt_desc, x1322)); }; float* x3836 = (float*)myMalloc(1 * sizeof(float));; x3836[0] = 1.0f; float* x3838 = (float*)myMalloc(1 * sizeof(float));; x3838[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3836, x_desc, x3140, x_desc, x3148, x_desc, x3140, x3838, x_desc, x3148)); }; if (x3842) { if (x3844) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(2048) x Sym(3127) x Sym(3127), res: x Const(64) x Const(2048) x Sym(3153) x Sym(3153)"); } float* x3849 = (float*)myMalloc(1 * sizeof(float));; x3849[0] = 1.0f; float* x3851 = (float*)myMalloc(1 * sizeof(float));; x3851[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3849, bias_desc, x3148, x3851, out_desc, x3174)); }; } else { float* x3855 = (float*)myMalloc(1 * sizeof(float));; x3855[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x3855, grad_out_desc, x3148, x3855, grad_bias_desc, x3174)); }; } float* x3860 = (float*)myMalloc(1 * sizeof(float));; x3860[0] = 0.0f; float* x3862 = (float*)myMalloc(1 * sizeof(float));; x3862[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3862, x3862, x3862, x3862, in_desc, x3159, out_desc, x3174, in_desc, x3165, sbmv_desc, x814, x1280,x1214, 1.0E-5, x3167, x3168)); }; // conv2D back-propagate float* x3866 = (float*)myMalloc(1 * sizeof(float));; x3866[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3866, filt_desc, x937, grad_out_desc, x3165, conv_desc, algo, ws_data, ws_size, x3866, grad_in_desc, x3031)); }; float* x3869 = (float*)myMalloc(1 * sizeof(float));; x3869[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3153, x3153)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3869, in_desc, x3023, grad_out_desc, x3165, conv_desc, algo, ws_data, ws_size, x3869, grad_filt_desc, x1321)); }; float* x3872 = (float*)myMalloc(1 * sizeof(float));; x3872[0] = 0.0f; float* x3874 = (float*)myMalloc(1 * sizeof(float));; x3874[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 2048, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3874, x3874, x3874, x3874, in_desc, x3133, out_desc, x3148, in_desc, x3139, sbmv_desc, x1012, x1346,x1169, 1.0E-5, x3141, x3142)); }; // conv2D back-propagate float* x3878 = (float*)myMalloc(1 * sizeof(float));; x3878[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3878, filt_desc, x931, grad_out_desc, x3139, conv_desc, algo, ws_data, ws_size, x3878, grad_in_desc, x3114)); }; float* x3881 = (float*)myMalloc(1 * sizeof(float));; x3881[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 2048, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 2048, x3127, x3127)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3881, in_desc, x3106, grad_out_desc, x3139, conv_desc, algo, ws_data, ws_size, x3881, grad_filt_desc, x1319)); }; float* x3884 = (float*)myMalloc(1 * sizeof(float));; x3884[0] = 1.0f; float* x3886 = (float*)myMalloc(1 * sizeof(float));; x3886[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3884, x_desc, x3106, x_desc, x3114, x_desc, x3106, x3886, x_desc, x3114)); }; float* x3889 = (float*)myMalloc(1 * sizeof(float));; x3889[0] = 0.0f; float* x3891 = (float*)myMalloc(1 * sizeof(float));; x3891[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3891, x3891, x3891, x3891, in_desc, x3099, out_desc, x3114, in_desc, x3105, sbmv_desc, x910, x1312,x1266, 1.0E-5, x3107, x3108)); }; // conv2D back-propagate float* x3895 = (float*)myMalloc(1 * sizeof(float));; x3895[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3895, filt_desc, x397, grad_out_desc, x3105, conv_desc, algo, ws_data, ws_size, x3895, grad_in_desc, x3078)); }; float* x3898 = (float*)myMalloc(1 * sizeof(float));; x3898[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 512, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3093, x3093)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3898, in_desc, x3070, grad_out_desc, x3105, conv_desc, algo, ws_data, ws_size, x3898, grad_filt_desc, x1141)); }; float* x3901 = (float*)myMalloc(1 * sizeof(float));; x3901[0] = 1.0f; float* x3903 = (float*)myMalloc(1 * sizeof(float));; x3903[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3901, x_desc, x3070, x_desc, x3078, x_desc, x3070, x3903, x_desc, x3078)); }; float* x3906 = (float*)myMalloc(1 * sizeof(float));; x3906[0] = 0.0f; float* x3908 = (float*)myMalloc(1 * sizeof(float));; x3908[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3908, x3908, x3908, x3908, in_desc, x3063, out_desc, x3078, in_desc, x3069, sbmv_desc, x898, x1308,x1331, 1.0E-5, x3071, x3072)); }; // conv2D back-propagate float* x3912 = (float*)myMalloc(1 * sizeof(float));; x3912[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3912, filt_desc, x712, grad_out_desc, x3069, conv_desc, algo, ws_data, ws_size, x3912, grad_in_desc, x3031)); }; float* x3915 = (float*)myMalloc(1 * sizeof(float));; x3915[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x3057, x3057)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3915, in_desc, x3023, grad_out_desc, x3069, conv_desc, algo, ws_data, ws_size, x3915, grad_filt_desc, x1246)); }; float* x3918 = (float*)myMalloc(1 * sizeof(float));; x3918[0] = 1.0f; float* x3920 = (float*)myMalloc(1 * sizeof(float));; x3920[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3918, x_desc, x3023, x_desc, x3031, x_desc, x3023, x3920, x_desc, x3031)); }; if (x3924) { if (x3927) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(3010) x Sym(3010), res: x Const(64) x Const(1024) x Sym(2893) x Sym(2893)"); } float* x3932 = (float*)myMalloc(1 * sizeof(float));; x3932[0] = 1.0f; float* x3934 = (float*)myMalloc(1 * sizeof(float));; x3934[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x3932, bias_desc, x3031, x3934, out_desc, x2914)); }; } else { float* x3938 = (float*)myMalloc(1 * sizeof(float));; x3938[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x3938, grad_out_desc, x3031, x3938, grad_bias_desc, x2914)); }; } float* x3943 = (float*)myMalloc(1 * sizeof(float));; x3943[0] = 0.0f; float* x3945 = (float*)myMalloc(1 * sizeof(float));; x3945[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3945, x3945, x3945, x3945, in_desc, x3016, out_desc, x3031, in_desc, x3022, sbmv_desc, x1039, x1355,x1200, 1.0E-5, x3024, x3025)); }; // conv2D back-propagate float* x3949 = (float*)myMalloc(1 * sizeof(float));; x3949[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3949, filt_desc, x586, grad_out_desc, x3022, conv_desc, algo, ws_data, ws_size, x3949, grad_in_desc, x2997)); }; float* x3952 = (float*)myMalloc(1 * sizeof(float));; x3952[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x3010, x3010)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3952, in_desc, x2989, grad_out_desc, x3022, conv_desc, algo, ws_data, ws_size, x3952, grad_filt_desc, x1204)); }; float* x3955 = (float*)myMalloc(1 * sizeof(float));; x3955[0] = 1.0f; float* x3957 = (float*)myMalloc(1 * sizeof(float));; x3957[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3955, x_desc, x2989, x_desc, x2997, x_desc, x2989, x3957, x_desc, x2997)); }; float* x3960 = (float*)myMalloc(1 * sizeof(float));; x3960[0] = 0.0f; float* x3962 = (float*)myMalloc(1 * sizeof(float));; x3962[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3962, x3962, x3962, x3962, in_desc, x2982, out_desc, x2997, in_desc, x2988, sbmv_desc, x718, x1248,x1296, 1.0E-5, x2990, x2991)); }; // conv2D back-propagate float* x3966 = (float*)myMalloc(1 * sizeof(float));; x3966[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3966, filt_desc, x973, grad_out_desc, x2988, conv_desc, algo, ws_data, ws_size, x3966, grad_in_desc, x2961)); }; float* x3969 = (float*)myMalloc(1 * sizeof(float));; x3969[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2976, x2976)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3969, in_desc, x2953, grad_out_desc, x2988, conv_desc, algo, ws_data, ws_size, x3969, grad_filt_desc, x1333)); }; float* x3972 = (float*)myMalloc(1 * sizeof(float));; x3972[0] = 1.0f; float* x3974 = (float*)myMalloc(1 * sizeof(float));; x3974[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3972, x_desc, x2953, x_desc, x2961, x_desc, x2953, x3974, x_desc, x2961)); }; float* x3977 = (float*)myMalloc(1 * sizeof(float));; x3977[0] = 0.0f; float* x3979 = (float*)myMalloc(1 * sizeof(float));; x3979[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x3979, x3979, x3979, x3979, in_desc, x2946, out_desc, x2961, in_desc, x2952, sbmv_desc, x550, x1192,x1360, 1.0E-5, x2954, x2955)); }; // conv2D back-propagate float* x3983 = (float*)myMalloc(1 * sizeof(float));; x3983[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x3983, filt_desc, x748, grad_out_desc, x2952, conv_desc, algo, ws_data, ws_size, x3983, grad_in_desc, x2914)); }; float* x3986 = (float*)myMalloc(1 * sizeof(float));; x3986[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2940, x2940)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x3986, in_desc, x2906, grad_out_desc, x2952, conv_desc, algo, ws_data, ws_size, x3986, grad_filt_desc, x1258)); }; float* x3989 = (float*)myMalloc(1 * sizeof(float));; x3989[0] = 1.0f; float* x3991 = (float*)myMalloc(1 * sizeof(float));; x3991[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x3989, x_desc, x2906, x_desc, x2914, x_desc, x2906, x3991, x_desc, x2914)); }; if (x3995) { if (x3997) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2893) x Sym(2893), res: x Const(64) x Const(1024) x Sym(2776) x Sym(2776)"); } float* x4002 = (float*)myMalloc(1 * sizeof(float));; x4002[0] = 1.0f; float* x4004 = (float*)myMalloc(1 * sizeof(float));; x4004[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4002, bias_desc, x2914, x4004, out_desc, x2797)); }; } else { float* x4008 = (float*)myMalloc(1 * sizeof(float));; x4008[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4008, grad_out_desc, x2914, x4008, grad_bias_desc, x2797)); }; } float* x4013 = (float*)myMalloc(1 * sizeof(float));; x4013[0] = 0.0f; float* x4015 = (float*)myMalloc(1 * sizeof(float));; x4015[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4015, x4015, x4015, x4015, in_desc, x2899, out_desc, x2914, in_desc, x2905, sbmv_desc, x472, x1166,x1227, 1.0E-5, x2907, x2908)); }; // conv2D back-propagate float* x4019 = (float*)myMalloc(1 * sizeof(float));; x4019[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4019, filt_desc, x958, grad_out_desc, x2905, conv_desc, algo, ws_data, ws_size, x4019, grad_in_desc, x2880)); }; float* x4022 = (float*)myMalloc(1 * sizeof(float));; x4022[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2893, x2893)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4022, in_desc, x2872, grad_out_desc, x2905, conv_desc, algo, ws_data, ws_size, x4022, grad_filt_desc, x1328)); }; float* x4025 = (float*)myMalloc(1 * sizeof(float));; x4025[0] = 1.0f; float* x4027 = (float*)myMalloc(1 * sizeof(float));; x4027[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4025, x_desc, x2872, x_desc, x2880, x_desc, x2872, x4027, x_desc, x2880)); }; float* x4030 = (float*)myMalloc(1 * sizeof(float));; x4030[0] = 0.0f; float* x4032 = (float*)myMalloc(1 * sizeof(float));; x4032[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4032, x4032, x4032, x4032, in_desc, x2865, out_desc, x2880, in_desc, x2871, sbmv_desc, x799, x1275,x1216, 1.0E-5, x2873, x2874)); }; // conv2D back-propagate float* x4036 = (float*)myMalloc(1 * sizeof(float));; x4036[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4036, filt_desc, x1081, grad_out_desc, x2871, conv_desc, algo, ws_data, ws_size, x4036, grad_in_desc, x2844)); }; float* x4039 = (float*)myMalloc(1 * sizeof(float));; x4039[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2859, x2859)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4039, in_desc, x2836, grad_out_desc, x2871, conv_desc, algo, ws_data, ws_size, x4039, grad_filt_desc, x1369)); }; float* x4042 = (float*)myMalloc(1 * sizeof(float));; x4042[0] = 1.0f; float* x4044 = (float*)myMalloc(1 * sizeof(float));; x4044[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4042, x_desc, x2836, x_desc, x2844, x_desc, x2836, x4044, x_desc, x2844)); }; float* x4047 = (float*)myMalloc(1 * sizeof(float));; x4047[0] = 0.0f; float* x4049 = (float*)myMalloc(1 * sizeof(float));; x4049[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4049, x4049, x4049, x4049, in_desc, x2829, out_desc, x2844, in_desc, x2835, sbmv_desc, x526, x1184,x1292, 1.0E-5, x2837, x2838)); }; // conv2D back-propagate float* x4053 = (float*)myMalloc(1 * sizeof(float));; x4053[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4053, filt_desc, x361, grad_out_desc, x2835, conv_desc, algo, ws_data, ws_size, x4053, grad_in_desc, x2797)); }; float* x4056 = (float*)myMalloc(1 * sizeof(float));; x4056[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2823, x2823)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4056, in_desc, x2789, grad_out_desc, x2835, conv_desc, algo, ws_data, ws_size, x4056, grad_filt_desc, x1129)); }; float* x4059 = (float*)myMalloc(1 * sizeof(float));; x4059[0] = 1.0f; float* x4061 = (float*)myMalloc(1 * sizeof(float));; x4061[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4059, x_desc, x2789, x_desc, x2797, x_desc, x2789, x4061, x_desc, x2797)); }; if (x4065) { if (x4067) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2776) x Sym(2776), res: x Const(64) x Const(1024) x Sym(2659) x Sym(2659)"); } float* x4072 = (float*)myMalloc(1 * sizeof(float));; x4072[0] = 1.0f; float* x4074 = (float*)myMalloc(1 * sizeof(float));; x4074[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4072, bias_desc, x2797, x4074, out_desc, x2680)); }; } else { float* x4078 = (float*)myMalloc(1 * sizeof(float));; x4078[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4078, grad_out_desc, x2797, x4078, grad_bias_desc, x2680)); }; } float* x4083 = (float*)myMalloc(1 * sizeof(float));; x4083[0] = 0.0f; float* x4085 = (float*)myMalloc(1 * sizeof(float));; x4085[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4085, x4085, x4085, x4085, in_desc, x2782, out_desc, x2797, in_desc, x2788, sbmv_desc, x1009, x1345,x1253, 1.0E-5, x2790, x2791)); }; // conv2D back-propagate float* x4089 = (float*)myMalloc(1 * sizeof(float));; x4089[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4089, filt_desc, x562, grad_out_desc, x2788, conv_desc, algo, ws_data, ws_size, x4089, grad_in_desc, x2763)); }; float* x4092 = (float*)myMalloc(1 * sizeof(float));; x4092[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2776, x2776)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4092, in_desc, x2755, grad_out_desc, x2788, conv_desc, algo, ws_data, ws_size, x4092, grad_filt_desc, x1196)); }; float* x4095 = (float*)myMalloc(1 * sizeof(float));; x4095[0] = 1.0f; float* x4097 = (float*)myMalloc(1 * sizeof(float));; x4097[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4095, x_desc, x2755, x_desc, x2763, x_desc, x2755, x4097, x_desc, x2763)); }; float* x4100 = (float*)myMalloc(1 * sizeof(float));; x4100[0] = 0.0f; float* x4102 = (float*)myMalloc(1 * sizeof(float));; x4102[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4102, x4102, x4102, x4102, in_desc, x2748, out_desc, x2763, in_desc, x2754, sbmv_desc, x517, x1181,x1243, 1.0E-5, x2756, x2757)); }; // conv2D back-propagate float* x4106 = (float*)myMalloc(1 * sizeof(float));; x4106[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4106, filt_desc, x1042, grad_out_desc, x2754, conv_desc, algo, ws_data, ws_size, x4106, grad_in_desc, x2727)); }; float* x4109 = (float*)myMalloc(1 * sizeof(float));; x4109[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2742, x2742)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4109, in_desc, x2719, grad_out_desc, x2754, conv_desc, algo, ws_data, ws_size, x4109, grad_filt_desc, x1356)); }; float* x4112 = (float*)myMalloc(1 * sizeof(float));; x4112[0] = 1.0f; float* x4114 = (float*)myMalloc(1 * sizeof(float));; x4114[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4112, x_desc, x2719, x_desc, x2727, x_desc, x2719, x4114, x_desc, x2727)); }; float* x4117 = (float*)myMalloc(1 * sizeof(float));; x4117[0] = 0.0f; float* x4119 = (float*)myMalloc(1 * sizeof(float));; x4119[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4119, x4119, x4119, x4119, in_desc, x2712, out_desc, x2727, in_desc, x2718, sbmv_desc, x571, x1199,x1348, 1.0E-5, x2720, x2721)); }; // conv2D back-propagate float* x4123 = (float*)myMalloc(1 * sizeof(float));; x4123[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4123, filt_desc, x313, grad_out_desc, x2718, conv_desc, algo, ws_data, ws_size, x4123, grad_in_desc, x2680)); }; float* x4126 = (float*)myMalloc(1 * sizeof(float));; x4126[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2706, x2706)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4126, in_desc, x2672, grad_out_desc, x2718, conv_desc, algo, ws_data, ws_size, x4126, grad_filt_desc, x1113)); }; float* x4129 = (float*)myMalloc(1 * sizeof(float));; x4129[0] = 1.0f; float* x4131 = (float*)myMalloc(1 * sizeof(float));; x4131[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4129, x_desc, x2672, x_desc, x2680, x_desc, x2672, x4131, x_desc, x2680)); }; if (x4135) { if (x4137) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2659) x Sym(2659), res: x Const(64) x Const(1024) x Sym(2542) x Sym(2542)"); } float* x4142 = (float*)myMalloc(1 * sizeof(float));; x4142[0] = 1.0f; float* x4144 = (float*)myMalloc(1 * sizeof(float));; x4144[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4142, bias_desc, x2680, x4144, out_desc, x2563)); }; } else { float* x4148 = (float*)myMalloc(1 * sizeof(float));; x4148[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4148, grad_out_desc, x2680, x4148, grad_bias_desc, x2563)); }; } float* x4153 = (float*)myMalloc(1 * sizeof(float));; x4153[0] = 0.0f; float* x4155 = (float*)myMalloc(1 * sizeof(float));; x4155[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4155, x4155, x4155, x4155, in_desc, x2665, out_desc, x2680, in_desc, x2671, sbmv_desc, x1084, x1370,x1164, 1.0E-5, x2673, x2674)); }; // conv2D back-propagate float* x4159 = (float*)myMalloc(1 * sizeof(float));; x4159[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4159, filt_desc, x643, grad_out_desc, x2671, conv_desc, algo, ws_data, ws_size, x4159, grad_in_desc, x2646)); }; float* x4162 = (float*)myMalloc(1 * sizeof(float));; x4162[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2659, x2659)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4162, in_desc, x2638, grad_out_desc, x2671, conv_desc, algo, ws_data, ws_size, x4162, grad_filt_desc, x1223)); }; float* x4165 = (float*)myMalloc(1 * sizeof(float));; x4165[0] = 1.0f; float* x4167 = (float*)myMalloc(1 * sizeof(float));; x4167[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4165, x_desc, x2638, x_desc, x2646, x_desc, x2638, x4167, x_desc, x2646)); }; float* x4170 = (float*)myMalloc(1 * sizeof(float));; x4170[0] = 0.0f; float* x4172 = (float*)myMalloc(1 * sizeof(float));; x4172[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4172, x4172, x4172, x4172, in_desc, x2631, out_desc, x2646, in_desc, x2637, sbmv_desc, x979, x1335,x1299, 1.0E-5, x2639, x2640)); }; // conv2D back-propagate float* x4176 = (float*)myMalloc(1 * sizeof(float));; x4176[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4176, filt_desc, x337, grad_out_desc, x2637, conv_desc, algo, ws_data, ws_size, x4176, grad_in_desc, x2610)); }; float* x4179 = (float*)myMalloc(1 * sizeof(float));; x4179[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2625, x2625)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4179, in_desc, x2602, grad_out_desc, x2637, conv_desc, algo, ws_data, ws_size, x4179, grad_filt_desc, x1121)); }; float* x4182 = (float*)myMalloc(1 * sizeof(float));; x4182[0] = 1.0f; float* x4184 = (float*)myMalloc(1 * sizeof(float));; x4184[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4182, x_desc, x2602, x_desc, x2610, x_desc, x2602, x4184, x_desc, x2610)); }; float* x4187 = (float*)myMalloc(1 * sizeof(float));; x4187[0] = 0.0f; float* x4189 = (float*)myMalloc(1 * sizeof(float));; x4189[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4189, x4189, x4189, x4189, in_desc, x2595, out_desc, x2610, in_desc, x2601, sbmv_desc, x682, x1236,x1304, 1.0E-5, x2603, x2604)); }; // conv2D back-propagate float* x4193 = (float*)myMalloc(1 * sizeof(float));; x4193[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4193, filt_desc, x949, grad_out_desc, x2601, conv_desc, algo, ws_data, ws_size, x4193, grad_in_desc, x2563)); }; float* x4196 = (float*)myMalloc(1 * sizeof(float));; x4196[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2589, x2589)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4196, in_desc, x2555, grad_out_desc, x2601, conv_desc, algo, ws_data, ws_size, x4196, grad_filt_desc, x1325)); }; float* x4199 = (float*)myMalloc(1 * sizeof(float));; x4199[0] = 1.0f; float* x4201 = (float*)myMalloc(1 * sizeof(float));; x4201[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4199, x_desc, x2555, x_desc, x2563, x_desc, x2555, x4201, x_desc, x2563)); }; if (x4205) { if (x4207) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2542) x Sym(2542), res: x Const(64) x Const(1024) x Sym(2399) x Sym(2399)"); } float* x4212 = (float*)myMalloc(1 * sizeof(float));; x4212[0] = 1.0f; float* x4214 = (float*)myMalloc(1 * sizeof(float));; x4214[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4212, bias_desc, x2563, x4214, out_desc, x2420)); }; } else { float* x4218 = (float*)myMalloc(1 * sizeof(float));; x4218[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4218, grad_out_desc, x2563, x4218, grad_bias_desc, x2420)); }; } float* x4223 = (float*)myMalloc(1 * sizeof(float));; x4223[0] = 0.0f; float* x4225 = (float*)myMalloc(1 * sizeof(float));; x4225[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4225, x4225, x4225, x4225, in_desc, x2548, out_desc, x2563, in_desc, x2554, sbmv_desc, x355, x1127,x1339, 1.0E-5, x2556, x2557)); }; // conv2D back-propagate float* x4229 = (float*)myMalloc(1 * sizeof(float));; x4229[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4229, filt_desc, x463, grad_out_desc, x2554, conv_desc, algo, ws_data, ws_size, x4229, grad_in_desc, x2529)); }; float* x4232 = (float*)myMalloc(1 * sizeof(float));; x4232[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2542, x2542)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4232, in_desc, x2521, grad_out_desc, x2554, conv_desc, algo, ws_data, ws_size, x4232, grad_filt_desc, x1163)); }; float* x4235 = (float*)myMalloc(1 * sizeof(float));; x4235[0] = 1.0f; float* x4237 = (float*)myMalloc(1 * sizeof(float));; x4237[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4235, x_desc, x2521, x_desc, x2529, x_desc, x2521, x4237, x_desc, x2529)); }; float* x4240 = (float*)myMalloc(1 * sizeof(float));; x4240[0] = 0.0f; float* x4242 = (float*)myMalloc(1 * sizeof(float));; x4242[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4242, x4242, x4242, x4242, in_desc, x2514, out_desc, x2529, in_desc, x2520, sbmv_desc, x1108, x1378,x1203, 1.0E-5, x2522, x2523)); }; // conv2D back-propagate float* x4246 = (float*)myMalloc(1 * sizeof(float));; x4246[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4246, filt_desc, x388, grad_out_desc, x2520, conv_desc, algo, ws_data, ws_size, x4246, grad_in_desc, x2493)); }; float* x4249 = (float*)myMalloc(1 * sizeof(float));; x4249[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2508, x2508)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4249, in_desc, x2485, grad_out_desc, x2520, conv_desc, algo, ws_data, ws_size, x4249, grad_filt_desc, x1138)); }; float* x4252 = (float*)myMalloc(1 * sizeof(float));; x4252[0] = 1.0f; float* x4254 = (float*)myMalloc(1 * sizeof(float));; x4254[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4252, x_desc, x2485, x_desc, x2493, x_desc, x2485, x4254, x_desc, x2493)); }; float* x4257 = (float*)myMalloc(1 * sizeof(float));; x4257[0] = 0.0f; float* x4259 = (float*)myMalloc(1 * sizeof(float));; x4259[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4259, x4259, x4259, x4259, in_desc, x2478, out_desc, x2493, in_desc, x2484, sbmv_desc, x385, x1137,x1326, 1.0E-5, x2486, x2487)); }; // conv2D back-propagate float* x4263 = (float*)myMalloc(1 * sizeof(float));; x4263[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4263, filt_desc, x334, grad_out_desc, x2484, conv_desc, algo, ws_data, ws_size, x4263, grad_in_desc, x2420)); }; float* x4266 = (float*)myMalloc(1 * sizeof(float));; x4266[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 1024, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2472, x2472)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4266, in_desc, x2412, grad_out_desc, x2484, conv_desc, algo, ws_data, ws_size, x4266, grad_filt_desc, x1120)); }; float* x4269 = (float*)myMalloc(1 * sizeof(float));; x4269[0] = 1.0f; float* x4271 = (float*)myMalloc(1 * sizeof(float));; x4271[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4269, x_desc, x2412, x_desc, x2420, x_desc, x2412, x4271, x_desc, x2420)); }; if (x4275) { if (x4277) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(1024) x Sym(2399) x Sym(2399), res: x Const(64) x Const(1024) x Sym(2425) x Sym(2425)"); } float* x4282 = (float*)myMalloc(1 * sizeof(float));; x4282[0] = 1.0f; float* x4284 = (float*)myMalloc(1 * sizeof(float));; x4284[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4282, bias_desc, x2420, x4284, out_desc, x2446)); }; } else { float* x4288 = (float*)myMalloc(1 * sizeof(float));; x4288[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4288, grad_out_desc, x2420, x4288, grad_bias_desc, x2446)); }; } float* x4293 = (float*)myMalloc(1 * sizeof(float));; x4293[0] = 0.0f; float* x4295 = (float*)myMalloc(1 * sizeof(float));; x4295[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4295, x4295, x4295, x4295, in_desc, x2431, out_desc, x2446, in_desc, x2437, sbmv_desc, x382, x1136,x1327, 1.0E-5, x2439, x2440)); }; // conv2D back-propagate float* x4299 = (float*)myMalloc(1 * sizeof(float));; x4299[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4299, filt_desc, x520, grad_out_desc, x2437, conv_desc, algo, ws_data, ws_size, x4299, grad_in_desc, x2303)); }; float* x4302 = (float*)myMalloc(1 * sizeof(float));; x4302[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2425, x2425)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4302, in_desc, x2295, grad_out_desc, x2437, conv_desc, algo, ws_data, ws_size, x4302, grad_filt_desc, x1182)); }; float* x4305 = (float*)myMalloc(1 * sizeof(float));; x4305[0] = 0.0f; float* x4307 = (float*)myMalloc(1 * sizeof(float));; x4307[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4307, x4307, x4307, x4307, in_desc, x2405, out_desc, x2420, in_desc, x2411, sbmv_desc, x349, x1125,x1224, 1.0E-5, x2413, x2414)); }; // conv2D back-propagate float* x4311 = (float*)myMalloc(1 * sizeof(float));; x4311[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4311, filt_desc, x1102, grad_out_desc, x2411, conv_desc, algo, ws_data, ws_size, x4311, grad_in_desc, x2386)); }; float* x4314 = (float*)myMalloc(1 * sizeof(float));; x4314[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 1024, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 1024, x2399, x2399)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4314, in_desc, x2378, grad_out_desc, x2411, conv_desc, algo, ws_data, ws_size, x4314, grad_filt_desc, x1376)); }; float* x4317 = (float*)myMalloc(1 * sizeof(float));; x4317[0] = 1.0f; float* x4319 = (float*)myMalloc(1 * sizeof(float));; x4319[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4317, x_desc, x2378, x_desc, x2386, x_desc, x2378, x4319, x_desc, x2386)); }; float* x4322 = (float*)myMalloc(1 * sizeof(float));; x4322[0] = 0.0f; float* x4324 = (float*)myMalloc(1 * sizeof(float));; x4324[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4324, x4324, x4324, x4324, in_desc, x2371, out_desc, x2386, in_desc, x2377, sbmv_desc, x619, x1215,x1123, 1.0E-5, x2379, x2380)); }; // conv2D back-propagate float* x4328 = (float*)myMalloc(1 * sizeof(float));; x4328[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4328, filt_desc, x820, grad_out_desc, x2377, conv_desc, algo, ws_data, ws_size, x4328, grad_in_desc, x2350)); }; float* x4331 = (float*)myMalloc(1 * sizeof(float));; x4331[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 256, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2365, x2365)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4331, in_desc, x2342, grad_out_desc, x2377, conv_desc, algo, ws_data, ws_size, x4331, grad_filt_desc, x1282)); }; float* x4334 = (float*)myMalloc(1 * sizeof(float));; x4334[0] = 1.0f; float* x4336 = (float*)myMalloc(1 * sizeof(float));; x4336[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4334, x_desc, x2342, x_desc, x2350, x_desc, x2342, x4336, x_desc, x2350)); }; float* x4339 = (float*)myMalloc(1 * sizeof(float));; x4339[0] = 0.0f; float* x4341 = (float*)myMalloc(1 * sizeof(float));; x4341[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4341, x4341, x4341, x4341, in_desc, x2335, out_desc, x2350, in_desc, x2341, sbmv_desc, x1105, x1377,x1128, 1.0E-5, x2343, x2344)); }; // conv2D back-propagate float* x4345 = (float*)myMalloc(1 * sizeof(float));; x4345[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4345, filt_desc, x835, grad_out_desc, x2341, conv_desc, algo, ws_data, ws_size, x4345, grad_in_desc, x2303)); }; float* x4348 = (float*)myMalloc(1 * sizeof(float));; x4348[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x2329, x2329)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4348, in_desc, x2295, grad_out_desc, x2341, conv_desc, algo, ws_data, ws_size, x4348, grad_filt_desc, x1287)); }; float* x4351 = (float*)myMalloc(1 * sizeof(float));; x4351[0] = 1.0f; float* x4353 = (float*)myMalloc(1 * sizeof(float));; x4353[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4351, x_desc, x2295, x_desc, x2303, x_desc, x2295, x4353, x_desc, x2303)); }; if (x4357) { if (x4360) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2282) x Sym(2282), res: x Const(64) x Const(512) x Sym(2165) x Sym(2165)"); } float* x4365 = (float*)myMalloc(1 * sizeof(float));; x4365[0] = 1.0f; float* x4367 = (float*)myMalloc(1 * sizeof(float));; x4367[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4365, bias_desc, x2303, x4367, out_desc, x2186)); }; } else { float* x4371 = (float*)myMalloc(1 * sizeof(float));; x4371[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4371, grad_out_desc, x2303, x4371, grad_bias_desc, x2186)); }; } float* x4376 = (float*)myMalloc(1 * sizeof(float));; x4376[0] = 0.0f; float* x4378 = (float*)myMalloc(1 * sizeof(float));; x4378[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4378, x4378, x4378, x4378, in_desc, x2288, out_desc, x2303, in_desc, x2294, sbmv_desc, x763, x1263,x1161, 1.0E-5, x2296, x2297)); }; // conv2D back-propagate float* x4382 = (float*)myMalloc(1 * sizeof(float));; x4382[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4382, filt_desc, x460, grad_out_desc, x2294, conv_desc, algo, ws_data, ws_size, x4382, grad_in_desc, x2269)); }; float* x4385 = (float*)myMalloc(1 * sizeof(float));; x4385[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2282, x2282)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4385, in_desc, x2261, grad_out_desc, x2294, conv_desc, algo, ws_data, ws_size, x4385, grad_filt_desc, x1162)); }; float* x4388 = (float*)myMalloc(1 * sizeof(float));; x4388[0] = 1.0f; float* x4390 = (float*)myMalloc(1 * sizeof(float));; x4390[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4388, x_desc, x2261, x_desc, x2269, x_desc, x2261, x4390, x_desc, x2269)); }; float* x4393 = (float*)myMalloc(1 * sizeof(float));; x4393[0] = 0.0f; float* x4395 = (float*)myMalloc(1 * sizeof(float));; x4395[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4395, x4395, x4395, x4395, in_desc, x2254, out_desc, x2269, in_desc, x2260, sbmv_desc, x532, x1186,x1145, 1.0E-5, x2262, x2263)); }; // conv2D back-propagate float* x4399 = (float*)myMalloc(1 * sizeof(float));; x4399[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4399, filt_desc, x790, grad_out_desc, x2260, conv_desc, algo, ws_data, ws_size, x4399, grad_in_desc, x2233)); }; float* x4402 = (float*)myMalloc(1 * sizeof(float));; x4402[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2248, x2248)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4402, in_desc, x2225, grad_out_desc, x2260, conv_desc, algo, ws_data, ws_size, x4402, grad_filt_desc, x1272)); }; float* x4405 = (float*)myMalloc(1 * sizeof(float));; x4405[0] = 1.0f; float* x4407 = (float*)myMalloc(1 * sizeof(float));; x4407[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4405, x_desc, x2225, x_desc, x2233, x_desc, x2225, x4407, x_desc, x2233)); }; float* x4410 = (float*)myMalloc(1 * sizeof(float));; x4410[0] = 0.0f; float* x4412 = (float*)myMalloc(1 * sizeof(float));; x4412[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4412, x4412, x4412, x4412, in_desc, x2218, out_desc, x2233, in_desc, x2224, sbmv_desc, x412, x1146,x1349, 1.0E-5, x2226, x2227)); }; // conv2D back-propagate float* x4416 = (float*)myMalloc(1 * sizeof(float));; x4416[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4416, filt_desc, x691, grad_out_desc, x2224, conv_desc, algo, ws_data, ws_size, x4416, grad_in_desc, x2186)); }; float* x4419 = (float*)myMalloc(1 * sizeof(float));; x4419[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2212, x2212)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4419, in_desc, x2178, grad_out_desc, x2224, conv_desc, algo, ws_data, ws_size, x4419, grad_filt_desc, x1239)); }; float* x4422 = (float*)myMalloc(1 * sizeof(float));; x4422[0] = 1.0f; float* x4424 = (float*)myMalloc(1 * sizeof(float));; x4424[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4422, x_desc, x2178, x_desc, x2186, x_desc, x2178, x4424, x_desc, x2186)); }; if (x4428) { if (x4430) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2165) x Sym(2165), res: x Const(64) x Const(512) x Sym(2048) x Sym(2048)"); } float* x4435 = (float*)myMalloc(1 * sizeof(float));; x4435[0] = 1.0f; float* x4437 = (float*)myMalloc(1 * sizeof(float));; x4437[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4435, bias_desc, x2186, x4437, out_desc, x2069)); }; } else { float* x4441 = (float*)myMalloc(1 * sizeof(float));; x4441[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4441, grad_out_desc, x2186, x4441, grad_bias_desc, x2069)); }; } float* x4446 = (float*)myMalloc(1 * sizeof(float));; x4446[0] = 0.0f; float* x4448 = (float*)myMalloc(1 * sizeof(float));; x4448[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4448, x4448, x4448, x4448, in_desc, x2171, out_desc, x2186, in_desc, x2177, sbmv_desc, x796, x1274,x1189, 1.0E-5, x2179, x2180)); }; // conv2D back-propagate float* x4452 = (float*)myMalloc(1 * sizeof(float));; x4452[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4452, filt_desc, x418, grad_out_desc, x2177, conv_desc, algo, ws_data, ws_size, x4452, grad_in_desc, x2152)); }; float* x4455 = (float*)myMalloc(1 * sizeof(float));; x4455[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2165, x2165)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4455, in_desc, x2144, grad_out_desc, x2177, conv_desc, algo, ws_data, ws_size, x4455, grad_filt_desc, x1148)); }; float* x4458 = (float*)myMalloc(1 * sizeof(float));; x4458[0] = 1.0f; float* x4460 = (float*)myMalloc(1 * sizeof(float));; x4460[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4458, x_desc, x2144, x_desc, x2152, x_desc, x2144, x4460, x_desc, x2152)); }; float* x4463 = (float*)myMalloc(1 * sizeof(float));; x4463[0] = 0.0f; float* x4465 = (float*)myMalloc(1 * sizeof(float));; x4465[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4465, x4465, x4465, x4465, in_desc, x2137, out_desc, x2152, in_desc, x2143, sbmv_desc, x676, x1234,x1168, 1.0E-5, x2145, x2146)); }; // conv2D back-propagate float* x4469 = (float*)myMalloc(1 * sizeof(float));; x4469[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4469, filt_desc, x868, grad_out_desc, x2143, conv_desc, algo, ws_data, ws_size, x4469, grad_in_desc, x2116)); }; float* x4472 = (float*)myMalloc(1 * sizeof(float));; x4472[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2131, x2131)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4472, in_desc, x2108, grad_out_desc, x2143, conv_desc, algo, ws_data, ws_size, x4472, grad_filt_desc, x1298)); }; float* x4475 = (float*)myMalloc(1 * sizeof(float));; x4475[0] = 1.0f; float* x4477 = (float*)myMalloc(1 * sizeof(float));; x4477[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4475, x_desc, x2108, x_desc, x2116, x_desc, x2108, x4477, x_desc, x2116)); }; float* x4480 = (float*)myMalloc(1 * sizeof(float));; x4480[0] = 0.0f; float* x4482 = (float*)myMalloc(1 * sizeof(float));; x4482[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4482, x4482, x4482, x4482, in_desc, x2101, out_desc, x2116, in_desc, x2107, sbmv_desc, x430, x1152,x1277, 1.0E-5, x2109, x2110)); }; // conv2D back-propagate float* x4486 = (float*)myMalloc(1 * sizeof(float));; x4486[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4486, filt_desc, x883, grad_out_desc, x2107, conv_desc, algo, ws_data, ws_size, x4486, grad_in_desc, x2069)); }; float* x4489 = (float*)myMalloc(1 * sizeof(float));; x4489[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2095, x2095)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4489, in_desc, x2061, grad_out_desc, x2107, conv_desc, algo, ws_data, ws_size, x4489, grad_filt_desc, x1303)); }; float* x4492 = (float*)myMalloc(1 * sizeof(float));; x4492[0] = 1.0f; float* x4494 = (float*)myMalloc(1 * sizeof(float));; x4494[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4492, x_desc, x2061, x_desc, x2069, x_desc, x2061, x4494, x_desc, x2069)); }; if (x4498) { if (x4500) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(2048) x Sym(2048), res: x Const(64) x Const(512) x Sym(1905) x Sym(1905)"); } float* x4505 = (float*)myMalloc(1 * sizeof(float));; x4505[0] = 1.0f; float* x4507 = (float*)myMalloc(1 * sizeof(float));; x4507[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4505, bias_desc, x2069, x4507, out_desc, x1926)); }; } else { float* x4511 = (float*)myMalloc(1 * sizeof(float));; x4511[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4511, grad_out_desc, x2069, x4511, grad_bias_desc, x1926)); }; } float* x4516 = (float*)myMalloc(1 * sizeof(float));; x4516[0] = 0.0f; float* x4518 = (float*)myMalloc(1 * sizeof(float));; x4518[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4518, x4518, x4518, x4518, in_desc, x2054, out_desc, x2069, in_desc, x2060, sbmv_desc, x451, x1159,x1353, 1.0E-5, x2062, x2063)); }; // conv2D back-propagate float* x4522 = (float*)myMalloc(1 * sizeof(float));; x4522[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4522, filt_desc, x628, grad_out_desc, x2060, conv_desc, algo, ws_data, ws_size, x4522, grad_in_desc, x2035)); }; float* x4525 = (float*)myMalloc(1 * sizeof(float));; x4525[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x2048, x2048)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4525, in_desc, x2027, grad_out_desc, x2060, conv_desc, algo, ws_data, ws_size, x4525, grad_filt_desc, x1218)); }; float* x4528 = (float*)myMalloc(1 * sizeof(float));; x4528[0] = 1.0f; float* x4530 = (float*)myMalloc(1 * sizeof(float));; x4530[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4528, x_desc, x2027, x_desc, x2035, x_desc, x2027, x4530, x_desc, x2035)); }; float* x4533 = (float*)myMalloc(1 * sizeof(float));; x4533[0] = 0.0f; float* x4535 = (float*)myMalloc(1 * sizeof(float));; x4535[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4535, x4535, x4535, x4535, in_desc, x2020, out_desc, x2035, in_desc, x2026, sbmv_desc, x319, x1115,x1202, 1.0E-5, x2028, x2029)); }; // conv2D back-propagate float* x4539 = (float*)myMalloc(1 * sizeof(float));; x4539[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4539, filt_desc, x1000, grad_out_desc, x2026, conv_desc, algo, ws_data, ws_size, x4539, grad_in_desc, x1999)); }; float* x4542 = (float*)myMalloc(1 * sizeof(float));; x4542[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x2014, x2014)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4542, in_desc, x1991, grad_out_desc, x2026, conv_desc, algo, ws_data, ws_size, x4542, grad_filt_desc, x1342)); }; float* x4545 = (float*)myMalloc(1 * sizeof(float));; x4545[0] = 1.0f; float* x4547 = (float*)myMalloc(1 * sizeof(float));; x4547[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4545, x_desc, x1991, x_desc, x1999, x_desc, x1991, x4547, x_desc, x1999)); }; float* x4550 = (float*)myMalloc(1 * sizeof(float));; x4550[0] = 0.0f; float* x4552 = (float*)myMalloc(1 * sizeof(float));; x4552[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4552, x4552, x4552, x4552, in_desc, x1984, out_desc, x1999, in_desc, x1990, sbmv_desc, x961, x1329,x1124, 1.0E-5, x1992, x1993)); }; // conv2D back-propagate float* x4556 = (float*)myMalloc(1 * sizeof(float));; x4556[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4556, filt_desc, x1063, grad_out_desc, x1990, conv_desc, algo, ws_data, ws_size, x4556, grad_in_desc, x1926)); }; float* x4559 = (float*)myMalloc(1 * sizeof(float));; x4559[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 512, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1978, x1978)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4559, in_desc, x1918, grad_out_desc, x1990, conv_desc, algo, ws_data, ws_size, x4559, grad_filt_desc, x1363)); }; float* x4562 = (float*)myMalloc(1 * sizeof(float));; x4562[0] = 1.0f; float* x4564 = (float*)myMalloc(1 * sizeof(float));; x4564[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4562, x_desc, x1918, x_desc, x1926, x_desc, x1918, x4564, x_desc, x1926)); }; if (x4568) { if (x4570) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(512) x Sym(1905) x Sym(1905), res: x Const(64) x Const(512) x Sym(1931) x Sym(1931)"); } float* x4575 = (float*)myMalloc(1 * sizeof(float));; x4575[0] = 1.0f; float* x4577 = (float*)myMalloc(1 * sizeof(float));; x4577[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4575, bias_desc, x1926, x4577, out_desc, x1952)); }; } else { float* x4581 = (float*)myMalloc(1 * sizeof(float));; x4581[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4581, grad_out_desc, x1926, x4581, grad_bias_desc, x1952)); }; } float* x4586 = (float*)myMalloc(1 * sizeof(float));; x4586[0] = 0.0f; float* x4588 = (float*)myMalloc(1 * sizeof(float));; x4588[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4588, x4588, x4588, x4588, in_desc, x1937, out_desc, x1952, in_desc, x1943, sbmv_desc, x916, x1314,x1226, 1.0E-5, x1945, x1946)); }; // conv2D back-propagate float* x4592 = (float*)myMalloc(1 * sizeof(float));; x4592[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4592, filt_desc, x1069, grad_out_desc, x1943, conv_desc, algo, ws_data, ws_size, x4592, grad_in_desc, x1809)); }; float* x4595 = (float*)myMalloc(1 * sizeof(float));; x4595[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1931, x1931)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4595, in_desc, x1801, grad_out_desc, x1943, conv_desc, algo, ws_data, ws_size, x4595, grad_filt_desc, x1365)); }; float* x4598 = (float*)myMalloc(1 * sizeof(float));; x4598[0] = 0.0f; float* x4600 = (float*)myMalloc(1 * sizeof(float));; x4600[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 512, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4600, x4600, x4600, x4600, in_desc, x1911, out_desc, x1926, in_desc, x1917, sbmv_desc, x730, x1252,x1317, 1.0E-5, x1919, x1920)); }; // conv2D back-propagate float* x4604 = (float*)myMalloc(1 * sizeof(float));; x4604[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4604, filt_desc, x613, grad_out_desc, x1917, conv_desc, algo, ws_data, ws_size, x4604, grad_in_desc, x1892)); }; float* x4607 = (float*)myMalloc(1 * sizeof(float));; x4607[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 512, 128, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1905, x1905)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4607, in_desc, x1884, grad_out_desc, x1917, conv_desc, algo, ws_data, ws_size, x4607, grad_filt_desc, x1213)); }; float* x4610 = (float*)myMalloc(1 * sizeof(float));; x4610[0] = 1.0f; float* x4612 = (float*)myMalloc(1 * sizeof(float));; x4612[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4610, x_desc, x1884, x_desc, x1892, x_desc, x1884, x4612, x_desc, x1892)); }; float* x4615 = (float*)myMalloc(1 * sizeof(float));; x4615[0] = 0.0f; float* x4617 = (float*)myMalloc(1 * sizeof(float));; x4617[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4617, x4617, x4617, x4617, in_desc, x1877, out_desc, x1892, in_desc, x1883, sbmv_desc, x1051, x1359,x1297, 1.0E-5, x1885, x1886)); }; // conv2D back-propagate float* x4621 = (float*)myMalloc(1 * sizeof(float));; x4621[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4621, filt_desc, x376, grad_out_desc, x1883, conv_desc, algo, ws_data, ws_size, x4621, grad_in_desc, x1856)); }; float* x4624 = (float*)myMalloc(1 * sizeof(float));; x4624[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 128, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1871, x1871)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4624, in_desc, x1848, grad_out_desc, x1883, conv_desc, algo, ws_data, ws_size, x4624, grad_filt_desc, x1134)); }; float* x4627 = (float*)myMalloc(1 * sizeof(float));; x4627[0] = 1.0f; float* x4629 = (float*)myMalloc(1 * sizeof(float));; x4629[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4627, x_desc, x1848, x_desc, x1856, x_desc, x1848, x4629, x_desc, x1856)); }; float* x4632 = (float*)myMalloc(1 * sizeof(float));; x4632[0] = 0.0f; float* x4634 = (float*)myMalloc(1 * sizeof(float));; x4634[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4634, x4634, x4634, x4634, in_desc, x1841, out_desc, x1856, in_desc, x1847, sbmv_desc, x547, x1191,x1279, 1.0E-5, x1849, x1850)); }; // conv2D back-propagate float* x4638 = (float*)myMalloc(1 * sizeof(float));; x4638[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4638, filt_desc, x328, grad_out_desc, x1847, conv_desc, algo, ws_data, ws_size, x4638, grad_in_desc, x1809)); }; float* x4641 = (float*)myMalloc(1 * sizeof(float));; x4641[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x1835, x1835)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4641, in_desc, x1801, grad_out_desc, x1847, conv_desc, algo, ws_data, ws_size, x4641, grad_filt_desc, x1118)); }; float* x4644 = (float*)myMalloc(1 * sizeof(float));; x4644[0] = 1.0f; float* x4646 = (float*)myMalloc(1 * sizeof(float));; x4646[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4644, x_desc, x1801, x_desc, x1809, x_desc, x1801, x4646, x_desc, x1809)); }; if (x4650) { if (x4653) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1788) x Sym(1788), res: x Const(64) x Const(256) x Sym(1671) x Sym(1671)"); } float* x4658 = (float*)myMalloc(1 * sizeof(float));; x4658[0] = 1.0f; float* x4660 = (float*)myMalloc(1 * sizeof(float));; x4660[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4658, bias_desc, x1809, x4660, out_desc, x1692)); }; } else { float* x4664 = (float*)myMalloc(1 * sizeof(float));; x4664[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4664, grad_out_desc, x1809, x4664, grad_bias_desc, x1692)); }; } float* x4669 = (float*)myMalloc(1 * sizeof(float));; x4669[0] = 0.0f; float* x4671 = (float*)myMalloc(1 * sizeof(float));; x4671[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4671, x4671, x4671, x4671, in_desc, x1794, out_desc, x1809, in_desc, x1800, sbmv_desc, x406, x1144,x1354, 1.0E-5, x1802, x1803)); }; // conv2D back-propagate float* x4675 = (float*)myMalloc(1 * sizeof(float));; x4675[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4675, filt_desc, x556, grad_out_desc, x1800, conv_desc, algo, ws_data, ws_size, x4675, grad_in_desc, x1775)); }; float* x4678 = (float*)myMalloc(1 * sizeof(float));; x4678[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1788, x1788)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4678, in_desc, x1767, grad_out_desc, x1800, conv_desc, algo, ws_data, ws_size, x4678, grad_filt_desc, x1194)); }; float* x4681 = (float*)myMalloc(1 * sizeof(float));; x4681[0] = 1.0f; float* x4683 = (float*)myMalloc(1 * sizeof(float));; x4683[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4681, x_desc, x1767, x_desc, x1775, x_desc, x1767, x4683, x_desc, x1775)); }; float* x4686 = (float*)myMalloc(1 * sizeof(float));; x4686[0] = 0.0f; float* x4688 = (float*)myMalloc(1 * sizeof(float));; x4688[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4688, x4688, x4688, x4688, in_desc, x1760, out_desc, x1775, in_desc, x1766, sbmv_desc, x511, x1179,x1242, 1.0E-5, x1768, x1769)); }; // conv2D back-propagate float* x4692 = (float*)myMalloc(1 * sizeof(float));; x4692[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4692, filt_desc, x514, grad_out_desc, x1766, conv_desc, algo, ws_data, ws_size, x4692, grad_in_desc, x1739)); }; float* x4695 = (float*)myMalloc(1 * sizeof(float));; x4695[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1754, x1754)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4695, in_desc, x1731, grad_out_desc, x1766, conv_desc, algo, ws_data, ws_size, x4695, grad_filt_desc, x1180)); }; float* x4698 = (float*)myMalloc(1 * sizeof(float));; x4698[0] = 1.0f; float* x4700 = (float*)myMalloc(1 * sizeof(float));; x4700[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4698, x_desc, x1731, x_desc, x1739, x_desc, x1731, x4700, x_desc, x1739)); }; float* x4703 = (float*)myMalloc(1 * sizeof(float));; x4703[0] = 0.0f; float* x4705 = (float*)myMalloc(1 * sizeof(float));; x4705[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4705, x4705, x4705, x4705, in_desc, x1724, out_desc, x1739, in_desc, x1730, sbmv_desc, x538, x1188,x1131, 1.0E-5, x1732, x1733)); }; // conv2D back-propagate float* x4709 = (float*)myMalloc(1 * sizeof(float));; x4709[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4709, filt_desc, x745, grad_out_desc, x1730, conv_desc, algo, ws_data, ws_size, x4709, grad_in_desc, x1692)); }; float* x4712 = (float*)myMalloc(1 * sizeof(float));; x4712[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1718, x1718)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4712, in_desc, x1684, grad_out_desc, x1730, conv_desc, algo, ws_data, ws_size, x4712, grad_filt_desc, x1257)); }; float* x4715 = (float*)myMalloc(1 * sizeof(float));; x4715[0] = 1.0f; float* x4717 = (float*)myMalloc(1 * sizeof(float));; x4717[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4715, x_desc, x1684, x_desc, x1692, x_desc, x1684, x4717, x_desc, x1692)); }; if (x4721) { if (x4723) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1671) x Sym(1671), res: x Const(64) x Const(256) x Sym(1531) x Sym(1531)"); } float* x4728 = (float*)myMalloc(1 * sizeof(float));; x4728[0] = 1.0f; float* x4730 = (float*)myMalloc(1 * sizeof(float));; x4730[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4728, bias_desc, x1692, x4730, out_desc, x1552)); }; } else { float* x4734 = (float*)myMalloc(1 * sizeof(float));; x4734[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4734, grad_out_desc, x1692, x4734, grad_bias_desc, x1552)); }; } float* x4739 = (float*)myMalloc(1 * sizeof(float));; x4739[0] = 0.0f; float* x4741 = (float*)myMalloc(1 * sizeof(float));; x4741[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4741, x4741, x4741, x4741, in_desc, x1677, out_desc, x1692, in_desc, x1683, sbmv_desc, x469, x1165,x1114, 1.0E-5, x1685, x1686)); }; // conv2D back-propagate float* x4745 = (float*)myMalloc(1 * sizeof(float));; x4745[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4745, filt_desc, x685, grad_out_desc, x1683, conv_desc, algo, ws_data, ws_size, x4745, grad_in_desc, x1658)); }; float* x4748 = (float*)myMalloc(1 * sizeof(float));; x4748[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1671, x1671)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4748, in_desc, x1650, grad_out_desc, x1683, conv_desc, algo, ws_data, ws_size, x4748, grad_filt_desc, x1237)); }; float* x4751 = (float*)myMalloc(1 * sizeof(float));; x4751[0] = 1.0f; float* x4753 = (float*)myMalloc(1 * sizeof(float));; x4753[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4751, x_desc, x1650, x_desc, x1658, x_desc, x1650, x4753, x_desc, x1658)); }; float* x4756 = (float*)myMalloc(1 * sizeof(float));; x4756[0] = 0.0f; float* x4758 = (float*)myMalloc(1 * sizeof(float));; x4758[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4758, x4758, x4758, x4758, in_desc, x1643, out_desc, x1658, in_desc, x1649, sbmv_desc, x919, x1315,x1260, 1.0E-5, x1651, x1652)); }; // conv2D back-propagate float* x4762 = (float*)myMalloc(1 * sizeof(float));; x4762[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4762, filt_desc, x544, grad_out_desc, x1649, conv_desc, algo, ws_data, ws_size, x4762, grad_in_desc, x1622)); }; float* x4765 = (float*)myMalloc(1 * sizeof(float));; x4765[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1637, x1637)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4765, in_desc, x1614, grad_out_desc, x1649, conv_desc, algo, ws_data, ws_size, x4765, grad_filt_desc, x1190)); }; float* x4768 = (float*)myMalloc(1 * sizeof(float));; x4768[0] = 1.0f; float* x4770 = (float*)myMalloc(1 * sizeof(float));; x4770[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4768, x_desc, x1614, x_desc, x1622, x_desc, x1614, x4770, x_desc, x1622)); }; float* x4773 = (float*)myMalloc(1 * sizeof(float));; x4773[0] = 0.0f; float* x4775 = (float*)myMalloc(1 * sizeof(float));; x4775[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4775, x4775, x4775, x4775, in_desc, x1607, out_desc, x1622, in_desc, x1613, sbmv_desc, x721, x1249,x1167, 1.0E-5, x1615, x1616)); }; // conv2D back-propagate float* x4779 = (float*)myMalloc(1 * sizeof(float));; x4779[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4779, filt_desc, x808, grad_out_desc, x1613, conv_desc, algo, ws_data, ws_size, x4779, grad_in_desc, x1552)); }; float* x4782 = (float*)myMalloc(1 * sizeof(float));; x4782[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 256, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1601, x1601)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4782, in_desc, x1544, grad_out_desc, x1613, conv_desc, algo, ws_data, ws_size, x4782, grad_filt_desc, x1278)); }; float* x4785 = (float*)myMalloc(1 * sizeof(float));; x4785[0] = 1.0f; float* x4787 = (float*)myMalloc(1 * sizeof(float));; x4787[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4785, x_desc, x1544, x_desc, x1552, x_desc, x1544, x4787, x_desc, x1552)); }; if (x4791) { if (x4793) { } else { assert(false && "bias shape should be equal to res or be 1, got bias: x Const(64) x Const(256) x Sym(1531) x Sym(1531), res: x Const(64) x Const(256) x Sym(1461) x Sym(1461)"); } float* x4798 = (float*)myMalloc(1 * sizeof(float));; x4798[0] = 1.0f; float* x4800 = (float*)myMalloc(1 * sizeof(float));; x4800[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x4798, bias_desc, x1552, x4800, out_desc, x1575)); }; } else { float* x4804 = (float*)myMalloc(1 * sizeof(float));; x4804[0] = 1.0f; { cudnnTensorDescriptor_t grad_bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); CUDNN_CALL(cudnnConvolutionBackwardBias( cudnnHandle, x4804, grad_out_desc, x1552, x4804, grad_bias_desc, x1575)); }; } float* x4809 = (float*)myMalloc(1 * sizeof(float));; x4809[0] = 0.0f; float* x4811 = (float*)myMalloc(1 * sizeof(float));; x4811[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4811, x4811, x4811, x4811, in_desc, x1560, out_desc, x1575, in_desc, x1566, sbmv_desc, x523, x1183,x1310, 1.0E-5, x1568, x1569)); }; // conv2D back-propagate float* x4815 = (float*)myMalloc(1 * sizeof(float));; x4815[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4815, filt_desc, x781, grad_out_desc, x1566, conv_desc, algo, ws_data, ws_size, x4815, grad_in_desc, x1453)); }; float* x4818 = (float*)myMalloc(1 * sizeof(float));; x4818[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1461, x1461)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4818, in_desc, x1451, grad_out_desc, x1566, conv_desc, algo, ws_data, ws_size, x4818, grad_filt_desc, x1269)); }; float* x4821 = (float*)myMalloc(1 * sizeof(float));; x4821[0] = 0.0f; float* x4823 = (float*)myMalloc(1 * sizeof(float));; x4823[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4823, x4823, x4823, x4823, in_desc, x1537, out_desc, x1552, in_desc, x1543, sbmv_desc, x892, x1306,x1233, 1.0E-5, x1545, x1546)); }; // conv2D back-propagate float* x4827 = (float*)myMalloc(1 * sizeof(float));; x4827[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4827, filt_desc, x391, grad_out_desc, x1543, conv_desc, algo, ws_data, ws_size, x4827, grad_in_desc, x1518)); }; float* x4830 = (float*)myMalloc(1 * sizeof(float));; x4830[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1531, x1531)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4830, in_desc, x1510, grad_out_desc, x1543, conv_desc, algo, ws_data, ws_size, x4830, grad_filt_desc, x1139)); }; float* x4833 = (float*)myMalloc(1 * sizeof(float));; x4833[0] = 1.0f; float* x4835 = (float*)myMalloc(1 * sizeof(float));; x4835[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4833, x_desc, x1510, x_desc, x1518, x_desc, x1510, x4835, x_desc, x1518)); }; float* x4838 = (float*)myMalloc(1 * sizeof(float));; x4838[0] = 0.0f; float* x4840 = (float*)myMalloc(1 * sizeof(float));; x4840[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4840, x4840, x4840, x4840, in_desc, x1503, out_desc, x1518, in_desc, x1509, sbmv_desc, x787, x1271,x1156, 1.0E-5, x1511, x1512)); }; // conv2D back-propagate float* x4844 = (float*)myMalloc(1 * sizeof(float));; x4844[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4844, filt_desc, x565, grad_out_desc, x1509, conv_desc, algo, ws_data, ws_size, x4844, grad_in_desc, x1482)); }; float* x4847 = (float*)myMalloc(1 * sizeof(float));; x4847[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1497, x1497)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4847, in_desc, x1474, grad_out_desc, x1509, conv_desc, algo, ws_data, ws_size, x4847, grad_filt_desc, x1197)); }; float* x4850 = (float*)myMalloc(1 * sizeof(float));; x4850[0] = 1.0f; float* x4852 = (float*)myMalloc(1 * sizeof(float));; x4852[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4850, x_desc, x1474, x_desc, x1482, x_desc, x1474, x4852, x_desc, x1482)); }; float* x4855 = (float*)myMalloc(1 * sizeof(float));; x4855[0] = 0.0f; float* x4857 = (float*)myMalloc(1 * sizeof(float));; x4857[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4857, x4857, x4857, x4857, in_desc, x1467, out_desc, x1482, in_desc, x1473, sbmv_desc, x373, x1133,x1160, 1.0E-5, x1475, x1476)); }; // conv2D back-propagate float* x4861 = (float*)myMalloc(1 * sizeof(float));; x4861[0] = 1.0f; { cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 1, 1)); cudnnTensorDescriptor_t grad_in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc, grad_out_desc, conv_desc, grad_in_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x4861, filt_desc, x994, grad_out_desc, x1473, conv_desc, algo, ws_data, ws_size, x4861, grad_in_desc, x1453)); }; float* x4864 = (float*)myMalloc(1 * sizeof(float));; x4864[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 64, 1, 1)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1461, x1461)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4864, in_desc, x1451, grad_out_desc, x1473, conv_desc, algo, ws_data, ws_size, x4864, grad_filt_desc, x1340)); }; float* x4867 = (float*)myMalloc(1 * sizeof(float));; x4867[0] = 0.0f; float* x4869 = (float*)myMalloc(1 * sizeof(float));; x4869[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1445, x1445)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingBackward( cudnnHandle, poolingDesc, x4869, out_desc, x1451, out_desc, x1453, in_desc, x1425 , x4867, in_desc, x1433)); }; float* x4872 = (float*)myMalloc(1 * sizeof(float));; x4872[0] = 1.0f; float* x4874 = (float*)myMalloc(1 * sizeof(float));; x4874[0] = 0.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationBackward( cudnnHandle, act_desc, x4872, x_desc, x1425, x_desc, x1433, x_desc, x1425, x4874, x_desc, x1433)); }; float* x4877 = (float*)myMalloc(1 * sizeof(float));; x4877[0] = 0.0f; float* x4879 = (float*)myMalloc(1 * sizeof(float));; x4879[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t sbmv_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x4879, x4879, x4879, x4879, in_desc, x1418, out_desc, x1433, in_desc, x1424, sbmv_desc, x913, x1313,x1358, 1.0E-5, x1426, x1427)); }; // conv2D back-propagate float* x4883 = (float*)myMalloc(1 * sizeof(float));; x4883[0] = 1.0f; { cudnnFilterDescriptor_t grad_filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&grad_filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( grad_filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 3, 3, 3)); cudnnTensorDescriptor_t grad_out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&grad_out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( grad_out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1412, x1412)); cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 3, 32, 32)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc, grad_out_desc, conv_desc, grad_filt_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x4883, in_desc, x1402, grad_out_desc, x1424, conv_desc, algo, ws_data, ws_size, x4883, grad_filt_desc, x1259)); }; // Tensor 'toCPU' invocation. float* x4887 = (float*)myMalloc(1 * sizeof(float));; CUDA_CALL(cudaMemcpy(x4887, x1410, 1 * sizeof(float), cudaMemcpyDeviceToHost)); float x4889 = x4887[0]; x1390 += x4889; float* x4891 = (float*)myMalloc(1 * sizeof(float));; x4891[0] = 1.0f; float* x4893 = (float*)myMalloc(1 * sizeof(float));; x4893[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,256,x4891,x313,1024,x4893, x1113, 1024, x313,1024)); arrayFill<<<28, 512>>>(x1113, 0.0f, 262144); float* x4897 = (float*)myMalloc(1 * sizeof(float));; x4897[0] = 1.0f; float* x4899 = (float*)myMalloc(1 * sizeof(float));; x4899[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x4897,x316,1,x4899, x1114, 1, x316,1)); arrayFill<<<28, 512>>>(x1114, 0.0f, 256); float* x4903 = (float*)myMalloc(1 * sizeof(float));; x4903[0] = 1.0f; float* x4905 = (float*)myMalloc(1 * sizeof(float));; x4905[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x4903,x319,1,x4905, x1115, 1, x319,1)); arrayFill<<<28, 512>>>(x1115, 0.0f, 128); float* x4909 = (float*)myMalloc(1 * sizeof(float));; x4909[0] = 1.0f; float* x4911 = (float*)myMalloc(1 * sizeof(float));; x4911[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x4909,x322,1,x4911, x1116, 1, x322,1)); arrayFill<<<28, 512>>>(x1116, 0.0f, 128); float* x4915 = (float*)myMalloc(1 * sizeof(float));; x4915[0] = 1.0f; float* x4917 = (float*)myMalloc(1 * sizeof(float));; x4917[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x4915,x325,1,x4917, x1117, 1, x325,1)); arrayFill<<<28, 512>>>(x1117, 0.0f, 64); float* x4921 = (float*)myMalloc(1 * sizeof(float));; x4921[0] = 1.0f; float* x4923 = (float*)myMalloc(1 * sizeof(float));; x4923[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,128,x4921,x328,256,x4923, x1118, 256, x328,256)); arrayFill<<<28, 512>>>(x1118, 0.0f, 32768); float* x4927 = (float*)myMalloc(1 * sizeof(float));; x4927[0] = 1.0f; float* x4929 = (float*)myMalloc(1 * sizeof(float));; x4929[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x4927,x331,1,x4929, x1119, 1, x331,1)); arrayFill<<<28, 512>>>(x1119, 0.0f, 512); float* x4933 = (float*)myMalloc(1 * sizeof(float));; x4933[0] = 1.0f; float* x4935 = (float*)myMalloc(1 * sizeof(float));; x4935[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,256,x4933,x334,1024,x4935, x1120, 1024, x334,1024)); arrayFill<<<28, 512>>>(x1120, 0.0f, 262144); float* x4939 = (float*)myMalloc(1 * sizeof(float));; x4939[0] = 1.0f; float* x4941 = (float*)myMalloc(1 * sizeof(float));; x4941[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x4939,x337,2304,x4941, x1121, 2304, x337,2304)); arrayFill<<<28, 512>>>(x1121, 0.0f, 589824); float* x4945 = (float*)myMalloc(1 * sizeof(float));; x4945[0] = 1.0f; float* x4947 = (float*)myMalloc(1 * sizeof(float));; x4947[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x4945,x340,1,x4947, x1122, 1, x340,1)); arrayFill<<<28, 512>>>(x1122, 0.0f, 512); float* x4951 = (float*)myMalloc(1 * sizeof(float));; x4951[0] = 1.0f; float* x4953 = (float*)myMalloc(1 * sizeof(float));; x4953[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x4951,x343,1,x4953, x1123, 1, x343,1)); arrayFill<<<28, 512>>>(x1123, 0.0f, 256); float* x4957 = (float*)myMalloc(1 * sizeof(float));; x4957[0] = 1.0f; float* x4959 = (float*)myMalloc(1 * sizeof(float));; x4959[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x4957,x346,1,x4959, x1124, 1, x346,1)); arrayFill<<<28, 512>>>(x1124, 0.0f, 128); float* x4963 = (float*)myMalloc(1 * sizeof(float));; x4963[0] = 1.0f; float* x4965 = (float*)myMalloc(1 * sizeof(float));; x4965[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x4963,x349,1,x4965, x1125, 1, x349,1)); arrayFill<<<28, 512>>>(x1125, 0.0f, 1024); float* x4969 = (float*)myMalloc(1 * sizeof(float));; x4969[0] = 1.0f; float* x4971 = (float*)myMalloc(1 * sizeof(float));; x4971[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x4969,x352,1,x4971, x1126, 1, x352,1)); arrayFill<<<28, 512>>>(x1126, 0.0f, 512); float* x4975 = (float*)myMalloc(1 * sizeof(float));; x4975[0] = 1.0f; float* x4977 = (float*)myMalloc(1 * sizeof(float));; x4977[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x4975,x355,1,x4977, x1127, 1, x355,1)); arrayFill<<<28, 512>>>(x1127, 0.0f, 1024); float* x4981 = (float*)myMalloc(1 * sizeof(float));; x4981[0] = 1.0f; float* x4983 = (float*)myMalloc(1 * sizeof(float));; x4983[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x4981,x358,1,x4983, x1128, 1, x358,1)); arrayFill<<<28, 512>>>(x1128, 0.0f, 256); float* x4987 = (float*)myMalloc(1 * sizeof(float));; x4987[0] = 1.0f; float* x4989 = (float*)myMalloc(1 * sizeof(float));; x4989[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,256,x4987,x361,1024,x4989, x1129, 1024, x361,1024)); arrayFill<<<28, 512>>>(x1129, 0.0f, 262144); float* x4993 = (float*)myMalloc(1 * sizeof(float));; x4993[0] = 1.0f; float* x4995 = (float*)myMalloc(1 * sizeof(float));; x4995[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x4993,x364,1,x4995, x1130, 1, x364,1)); arrayFill<<<28, 512>>>(x1130, 0.0f, 512); float* x4999 = (float*)myMalloc(1 * sizeof(float));; x4999[0] = 1.0f; float* x5001 = (float*)myMalloc(1 * sizeof(float));; x5001[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x4999,x367,1,x5001, x1131, 1, x367,1)); arrayFill<<<28, 512>>>(x1131, 0.0f, 64); float* x5005 = (float*)myMalloc(1 * sizeof(float));; x5005[0] = 1.0f; float* x5007 = (float*)myMalloc(1 * sizeof(float));; x5007[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5005,x370,1,x5007, x1132, 1, x370,1)); arrayFill<<<28, 512>>>(x1132, 0.0f, 512); float* x5011 = (float*)myMalloc(1 * sizeof(float));; x5011[0] = 1.0f; float* x5013 = (float*)myMalloc(1 * sizeof(float));; x5013[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5011,x373,1,x5013, x1133, 1, x373,1)); arrayFill<<<28, 512>>>(x1133, 0.0f, 64); float* x5017 = (float*)myMalloc(1 * sizeof(float));; x5017[0] = 1.0f; float* x5019 = (float*)myMalloc(1 * sizeof(float));; x5019[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1152,128,x5017,x376,1152,x5019, x1134, 1152, x376,1152)); arrayFill<<<28, 512>>>(x1134, 0.0f, 147456); float* x5023 = (float*)myMalloc(1 * sizeof(float));; x5023[0] = 1.0f; float* x5025 = (float*)myMalloc(1 * sizeof(float));; x5025[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 4608,512,x5023,x379,4608,x5025, x1135, 4608, x379,4608)); arrayFill<<<28, 512>>>(x1135, 0.0f, 2359296); float* x5029 = (float*)myMalloc(1 * sizeof(float));; x5029[0] = 1.0f; float* x5031 = (float*)myMalloc(1 * sizeof(float));; x5031[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5029,x382,1,x5031, x1136, 1, x382,1)); arrayFill<<<28, 512>>>(x1136, 0.0f, 1024); float* x5035 = (float*)myMalloc(1 * sizeof(float));; x5035[0] = 1.0f; float* x5037 = (float*)myMalloc(1 * sizeof(float));; x5037[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5035,x385,1,x5037, x1137, 1, x385,1)); arrayFill<<<28, 512>>>(x1137, 0.0f, 256); float* x5041 = (float*)myMalloc(1 * sizeof(float));; x5041[0] = 1.0f; float* x5043 = (float*)myMalloc(1 * sizeof(float));; x5043[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x5041,x388,2304,x5043, x1138, 2304, x388,2304)); arrayFill<<<28, 512>>>(x1138, 0.0f, 589824); float* x5047 = (float*)myMalloc(1 * sizeof(float));; x5047[0] = 1.0f; float* x5049 = (float*)myMalloc(1 * sizeof(float));; x5049[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,256,x5047,x391,64,x5049, x1139, 64, x391,64)); arrayFill<<<28, 512>>>(x1139, 0.0f, 16384); float* x5053 = (float*)myMalloc(1 * sizeof(float));; x5053[0] = 1.0f; float* x5055 = (float*)myMalloc(1 * sizeof(float));; x5055[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,2048,x5053,x394,512,x5055, x1140, 512, x394,512)); arrayFill<<<28, 512>>>(x1140, 0.0f, 1048576); float* x5059 = (float*)myMalloc(1 * sizeof(float));; x5059[0] = 1.0f; float* x5061 = (float*)myMalloc(1 * sizeof(float));; x5061[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 4608,512,x5059,x397,4608,x5061, x1141, 4608, x397,4608)); arrayFill<<<28, 512>>>(x1141, 0.0f, 2359296); float* x5065 = (float*)myMalloc(1 * sizeof(float));; x5065[0] = 1.0f; float* x5067 = (float*)myMalloc(1 * sizeof(float));; x5067[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5065,x400,1,x5067, x1142, 1, x400,1)); arrayFill<<<28, 512>>>(x1142, 0.0f, 128); float* x5071 = (float*)myMalloc(1 * sizeof(float));; x5071[0] = 1.0f; float* x5073 = (float*)myMalloc(1 * sizeof(float));; x5073[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5071,x403,1,x5073, x1143, 1, x403,1)); arrayFill<<<28, 512>>>(x1143, 0.0f, 256); float* x5077 = (float*)myMalloc(1 * sizeof(float));; x5077[0] = 1.0f; float* x5079 = (float*)myMalloc(1 * sizeof(float));; x5079[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5077,x406,1,x5079, x1144, 1, x406,1)); arrayFill<<<28, 512>>>(x1144, 0.0f, 256); float* x5083 = (float*)myMalloc(1 * sizeof(float));; x5083[0] = 1.0f; float* x5085 = (float*)myMalloc(1 * sizeof(float));; x5085[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5083,x409,1,x5085, x1145, 1, x409,1)); arrayFill<<<28, 512>>>(x1145, 0.0f, 128); float* x5089 = (float*)myMalloc(1 * sizeof(float));; x5089[0] = 1.0f; float* x5091 = (float*)myMalloc(1 * sizeof(float));; x5091[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5089,x412,1,x5091, x1146, 1, x412,1)); arrayFill<<<28, 512>>>(x1146, 0.0f, 128); float* x5095 = (float*)myMalloc(1 * sizeof(float));; x5095[0] = 1.0f; float* x5097 = (float*)myMalloc(1 * sizeof(float));; x5097[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5095,x415,1,x5097, x1147, 1, x415,1)); arrayFill<<<28, 512>>>(x1147, 0.0f, 64); float* x5101 = (float*)myMalloc(1 * sizeof(float));; x5101[0] = 1.0f; float* x5103 = (float*)myMalloc(1 * sizeof(float));; x5103[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 128,512,x5101,x418,128,x5103, x1148, 128, x418,128)); arrayFill<<<28, 512>>>(x1148, 0.0f, 65536); float* x5107 = (float*)myMalloc(1 * sizeof(float));; x5107[0] = 1.0f; float* x5109 = (float*)myMalloc(1 * sizeof(float));; x5109[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5107,x421,1,x5109, x1149, 1, x421,1)); arrayFill<<<28, 512>>>(x1149, 0.0f, 512); float* x5113 = (float*)myMalloc(1 * sizeof(float));; x5113[0] = 1.0f; float* x5115 = (float*)myMalloc(1 * sizeof(float));; x5115[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5113,x424,1,x5115, x1150, 1, x424,1)); arrayFill<<<28, 512>>>(x1150, 0.0f, 128); float* x5119 = (float*)myMalloc(1 * sizeof(float));; x5119[0] = 1.0f; float* x5121 = (float*)myMalloc(1 * sizeof(float));; x5121[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5119,x427,1,x5121, x1151, 1, x427,1)); arrayFill<<<28, 512>>>(x1151, 0.0f, 64); float* x5125 = (float*)myMalloc(1 * sizeof(float));; x5125[0] = 1.0f; float* x5127 = (float*)myMalloc(1 * sizeof(float));; x5127[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5125,x430,1,x5127, x1152, 1, x430,1)); arrayFill<<<28, 512>>>(x1152, 0.0f, 128); float* x5131 = (float*)myMalloc(1 * sizeof(float));; x5131[0] = 1.0f; float* x5133 = (float*)myMalloc(1 * sizeof(float));; x5133[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5131,x433,1,x5133, x1153, 1, x433,1)); arrayFill<<<28, 512>>>(x1153, 0.0f, 512); float* x5137 = (float*)myMalloc(1 * sizeof(float));; x5137[0] = 1.0f; float* x5139 = (float*)myMalloc(1 * sizeof(float));; x5139[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,2048,x5137,x436,512,x5139, x1154, 512, x436,512)); arrayFill<<<28, 512>>>(x1154, 0.0f, 1048576); float* x5143 = (float*)myMalloc(1 * sizeof(float));; x5143[0] = 1.0f; float* x5145 = (float*)myMalloc(1 * sizeof(float));; x5145[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,10,x5143,x439,1,x5145, x1155, 1, x439,1)); arrayFill<<<28, 512>>>(x1155, 0.0f, 10); float* x5149 = (float*)myMalloc(1 * sizeof(float));; x5149[0] = 1.0f; float* x5151 = (float*)myMalloc(1 * sizeof(float));; x5151[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5149,x442,1,x5151, x1156, 1, x442,1)); arrayFill<<<28, 512>>>(x1156, 0.0f, 64); float* x5155 = (float*)myMalloc(1 * sizeof(float));; x5155[0] = 1.0f; float* x5157 = (float*)myMalloc(1 * sizeof(float));; x5157[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5155,x445,1,x5157, x1157, 1, x445,1)); arrayFill<<<28, 512>>>(x1157, 0.0f, 512); float* x5161 = (float*)myMalloc(1 * sizeof(float));; x5161[0] = 1.0f; float* x5163 = (float*)myMalloc(1 * sizeof(float));; x5163[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5161,x448,1,x5163, x1158, 1, x448,1)); arrayFill<<<28, 512>>>(x1158, 0.0f, 64); float* x5167 = (float*)myMalloc(1 * sizeof(float));; x5167[0] = 1.0f; float* x5169 = (float*)myMalloc(1 * sizeof(float));; x5169[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5167,x451,1,x5169, x1159, 1, x451,1)); arrayFill<<<28, 512>>>(x1159, 0.0f, 512); float* x5173 = (float*)myMalloc(1 * sizeof(float));; x5173[0] = 1.0f; float* x5175 = (float*)myMalloc(1 * sizeof(float));; x5175[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5173,x454,1,x5175, x1160, 1, x454,1)); arrayFill<<<28, 512>>>(x1160, 0.0f, 64); float* x5179 = (float*)myMalloc(1 * sizeof(float));; x5179[0] = 1.0f; float* x5181 = (float*)myMalloc(1 * sizeof(float));; x5181[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5179,x457,1,x5181, x1161, 1, x457,1)); arrayFill<<<28, 512>>>(x1161, 0.0f, 512); float* x5185 = (float*)myMalloc(1 * sizeof(float));; x5185[0] = 1.0f; float* x5187 = (float*)myMalloc(1 * sizeof(float));; x5187[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 128,512,x5185,x460,128,x5187, x1162, 128, x460,128)); arrayFill<<<28, 512>>>(x1162, 0.0f, 65536); float* x5191 = (float*)myMalloc(1 * sizeof(float));; x5191[0] = 1.0f; float* x5193 = (float*)myMalloc(1 * sizeof(float));; x5193[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x5191,x463,256,x5193, x1163, 256, x463,256)); arrayFill<<<28, 512>>>(x1163, 0.0f, 262144); float* x5197 = (float*)myMalloc(1 * sizeof(float));; x5197[0] = 1.0f; float* x5199 = (float*)myMalloc(1 * sizeof(float));; x5199[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5197,x466,1,x5199, x1164, 1, x466,1)); arrayFill<<<28, 512>>>(x1164, 0.0f, 1024); float* x5203 = (float*)myMalloc(1 * sizeof(float));; x5203[0] = 1.0f; float* x5205 = (float*)myMalloc(1 * sizeof(float));; x5205[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5203,x469,1,x5205, x1165, 1, x469,1)); arrayFill<<<28, 512>>>(x1165, 0.0f, 256); float* x5209 = (float*)myMalloc(1 * sizeof(float));; x5209[0] = 1.0f; float* x5211 = (float*)myMalloc(1 * sizeof(float));; x5211[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5209,x472,1,x5211, x1166, 1, x472,1)); arrayFill<<<28, 512>>>(x1166, 0.0f, 1024); float* x5215 = (float*)myMalloc(1 * sizeof(float));; x5215[0] = 1.0f; float* x5217 = (float*)myMalloc(1 * sizeof(float));; x5217[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5215,x475,1,x5217, x1167, 1, x475,1)); arrayFill<<<28, 512>>>(x1167, 0.0f, 64); float* x5221 = (float*)myMalloc(1 * sizeof(float));; x5221[0] = 1.0f; float* x5223 = (float*)myMalloc(1 * sizeof(float));; x5223[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5221,x478,1,x5223, x1168, 1, x478,1)); arrayFill<<<28, 512>>>(x1168, 0.0f, 128); float* x5227 = (float*)myMalloc(1 * sizeof(float));; x5227[0] = 1.0f; float* x5229 = (float*)myMalloc(1 * sizeof(float));; x5229[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5227,x481,1,x5229, x1169, 1, x481,1)); arrayFill<<<28, 512>>>(x1169, 0.0f, 2048); float* x5233 = (float*)myMalloc(1 * sizeof(float));; x5233[0] = 1.0f; float* x5235 = (float*)myMalloc(1 * sizeof(float));; x5235[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5233,x484,1,x5235, x1170, 1, x484,1)); arrayFill<<<28, 512>>>(x1170, 0.0f, 256); float* x5239 = (float*)myMalloc(1 * sizeof(float));; x5239[0] = 1.0f; float* x5241 = (float*)myMalloc(1 * sizeof(float));; x5241[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5239,x487,1,x5241, x1171, 1, x487,1)); arrayFill<<<28, 512>>>(x1171, 0.0f, 2048); float* x5245 = (float*)myMalloc(1 * sizeof(float));; x5245[0] = 1.0f; float* x5247 = (float*)myMalloc(1 * sizeof(float));; x5247[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5245,x490,1,x5247, x1172, 1, x490,1)); arrayFill<<<28, 512>>>(x1172, 0.0f, 512); float* x5251 = (float*)myMalloc(1 * sizeof(float));; x5251[0] = 1.0f; float* x5253 = (float*)myMalloc(1 * sizeof(float));; x5253[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5251,x493,1,x5253, x1173, 1, x493,1)); arrayFill<<<28, 512>>>(x1173, 0.0f, 512); float* x5257 = (float*)myMalloc(1 * sizeof(float));; x5257[0] = 1.0f; float* x5259 = (float*)myMalloc(1 * sizeof(float));; x5259[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5257,x496,1,x5259, x1174, 1, x496,1)); arrayFill<<<28, 512>>>(x1174, 0.0f, 512); float* x5263 = (float*)myMalloc(1 * sizeof(float));; x5263[0] = 1.0f; float* x5265 = (float*)myMalloc(1 * sizeof(float));; x5265[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5263,x499,1,x5265, x1175, 1, x499,1)); arrayFill<<<28, 512>>>(x1175, 0.0f, 2048); float* x5269 = (float*)myMalloc(1 * sizeof(float));; x5269[0] = 1.0f; float* x5271 = (float*)myMalloc(1 * sizeof(float));; x5271[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5269,x502,1,x5271, x1176, 1, x502,1)); arrayFill<<<28, 512>>>(x1176, 0.0f, 256); float* x5275 = (float*)myMalloc(1 * sizeof(float));; x5275[0] = 1.0f; float* x5277 = (float*)myMalloc(1 * sizeof(float));; x5277[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5275,x505,1,x5277, x1177, 1, x505,1)); arrayFill<<<28, 512>>>(x1177, 0.0f, 256); float* x5281 = (float*)myMalloc(1 * sizeof(float));; x5281[0] = 1.0f; float* x5283 = (float*)myMalloc(1 * sizeof(float));; x5283[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5281,x508,1,x5283, x1178, 1, x508,1)); arrayFill<<<28, 512>>>(x1178, 0.0f, 256); float* x5287 = (float*)myMalloc(1 * sizeof(float));; x5287[0] = 1.0f; float* x5289 = (float*)myMalloc(1 * sizeof(float));; x5289[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5287,x511,1,x5289, x1179, 1, x511,1)); arrayFill<<<28, 512>>>(x1179, 0.0f, 64); float* x5293 = (float*)myMalloc(1 * sizeof(float));; x5293[0] = 1.0f; float* x5295 = (float*)myMalloc(1 * sizeof(float));; x5295[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 576,64,x5293,x514,576,x5295, x1180, 576, x514,576)); arrayFill<<<28, 512>>>(x1180, 0.0f, 36864); float* x5299 = (float*)myMalloc(1 * sizeof(float));; x5299[0] = 1.0f; float* x5301 = (float*)myMalloc(1 * sizeof(float));; x5301[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5299,x517,1,x5301, x1181, 1, x517,1)); arrayFill<<<28, 512>>>(x1181, 0.0f, 256); float* x5305 = (float*)myMalloc(1 * sizeof(float));; x5305[0] = 1.0f; float* x5307 = (float*)myMalloc(1 * sizeof(float));; x5307[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,1024,x5305,x520,512,x5307, x1182, 512, x520,512)); arrayFill<<<28, 512>>>(x1182, 0.0f, 524288); float* x5311 = (float*)myMalloc(1 * sizeof(float));; x5311[0] = 1.0f; float* x5313 = (float*)myMalloc(1 * sizeof(float));; x5313[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5311,x523,1,x5313, x1183, 1, x523,1)); arrayFill<<<28, 512>>>(x1183, 0.0f, 256); float* x5317 = (float*)myMalloc(1 * sizeof(float));; x5317[0] = 1.0f; float* x5319 = (float*)myMalloc(1 * sizeof(float));; x5319[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5317,x526,1,x5319, x1184, 1, x526,1)); arrayFill<<<28, 512>>>(x1184, 0.0f, 256); float* x5323 = (float*)myMalloc(1 * sizeof(float));; x5323[0] = 1.0f; float* x5325 = (float*)myMalloc(1 * sizeof(float));; x5325[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5323,x529,1,x5325, x1185, 1, x529,1)); arrayFill<<<28, 512>>>(x1185, 0.0f, 512); float* x5329 = (float*)myMalloc(1 * sizeof(float));; x5329[0] = 1.0f; float* x5331 = (float*)myMalloc(1 * sizeof(float));; x5331[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5329,x532,1,x5331, x1186, 1, x532,1)); arrayFill<<<28, 512>>>(x1186, 0.0f, 128); float* x5335 = (float*)myMalloc(1 * sizeof(float));; x5335[0] = 1.0f; float* x5337 = (float*)myMalloc(1 * sizeof(float));; x5337[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5335,x535,1,x5337, x1187, 1, x535,1)); arrayFill<<<28, 512>>>(x1187, 0.0f, 256); float* x5341 = (float*)myMalloc(1 * sizeof(float));; x5341[0] = 1.0f; float* x5343 = (float*)myMalloc(1 * sizeof(float));; x5343[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5341,x538,1,x5343, x1188, 1, x538,1)); arrayFill<<<28, 512>>>(x1188, 0.0f, 64); float* x5347 = (float*)myMalloc(1 * sizeof(float));; x5347[0] = 1.0f; float* x5349 = (float*)myMalloc(1 * sizeof(float));; x5349[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5347,x541,1,x5349, x1189, 1, x541,1)); arrayFill<<<28, 512>>>(x1189, 0.0f, 512); float* x5353 = (float*)myMalloc(1 * sizeof(float));; x5353[0] = 1.0f; float* x5355 = (float*)myMalloc(1 * sizeof(float));; x5355[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 576,64,x5353,x544,576,x5355, x1190, 576, x544,576)); arrayFill<<<28, 512>>>(x1190, 0.0f, 36864); float* x5359 = (float*)myMalloc(1 * sizeof(float));; x5359[0] = 1.0f; float* x5361 = (float*)myMalloc(1 * sizeof(float));; x5361[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5359,x547,1,x5361, x1191, 1, x547,1)); arrayFill<<<28, 512>>>(x1191, 0.0f, 128); float* x5365 = (float*)myMalloc(1 * sizeof(float));; x5365[0] = 1.0f; float* x5367 = (float*)myMalloc(1 * sizeof(float));; x5367[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5365,x550,1,x5367, x1192, 1, x550,1)); arrayFill<<<28, 512>>>(x1192, 0.0f, 256); float* x5371 = (float*)myMalloc(1 * sizeof(float));; x5371[0] = 1.0f; float* x5373 = (float*)myMalloc(1 * sizeof(float));; x5373[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5371,x553,1,x5373, x1193, 1, x553,1)); arrayFill<<<28, 512>>>(x1193, 0.0f, 1024); float* x5377 = (float*)myMalloc(1 * sizeof(float));; x5377[0] = 1.0f; float* x5379 = (float*)myMalloc(1 * sizeof(float));; x5379[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,256,x5377,x556,64,x5379, x1194, 64, x556,64)); arrayFill<<<28, 512>>>(x1194, 0.0f, 16384); float* x5383 = (float*)myMalloc(1 * sizeof(float));; x5383[0] = 1.0f; float* x5385 = (float*)myMalloc(1 * sizeof(float));; x5385[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5383,x559,1,x5385, x1195, 1, x559,1)); arrayFill<<<28, 512>>>(x1195, 0.0f, 512); float* x5389 = (float*)myMalloc(1 * sizeof(float));; x5389[0] = 1.0f; float* x5391 = (float*)myMalloc(1 * sizeof(float));; x5391[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x5389,x562,256,x5391, x1196, 256, x562,256)); arrayFill<<<28, 512>>>(x1196, 0.0f, 262144); float* x5395 = (float*)myMalloc(1 * sizeof(float));; x5395[0] = 1.0f; float* x5397 = (float*)myMalloc(1 * sizeof(float));; x5397[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 576,64,x5395,x565,576,x5397, x1197, 576, x565,576)); arrayFill<<<28, 512>>>(x1197, 0.0f, 36864); float* x5401 = (float*)myMalloc(1 * sizeof(float));; x5401[0] = 1.0f; float* x5403 = (float*)myMalloc(1 * sizeof(float));; x5403[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5401,x568,1,x5403, x1198, 1, x568,1)); arrayFill<<<28, 512>>>(x1198, 0.0f, 256); float* x5407 = (float*)myMalloc(1 * sizeof(float));; x5407[0] = 1.0f; float* x5409 = (float*)myMalloc(1 * sizeof(float));; x5409[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5407,x571,1,x5409, x1199, 1, x571,1)); arrayFill<<<28, 512>>>(x1199, 0.0f, 256); float* x5413 = (float*)myMalloc(1 * sizeof(float));; x5413[0] = 1.0f; float* x5415 = (float*)myMalloc(1 * sizeof(float));; x5415[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5413,x574,1,x5415, x1200, 1, x574,1)); arrayFill<<<28, 512>>>(x1200, 0.0f, 1024); float* x5419 = (float*)myMalloc(1 * sizeof(float));; x5419[0] = 1.0f; float* x5421 = (float*)myMalloc(1 * sizeof(float));; x5421[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5419,x577,1,x5421, x1201, 1, x577,1)); arrayFill<<<28, 512>>>(x1201, 0.0f, 2048); float* x5425 = (float*)myMalloc(1 * sizeof(float));; x5425[0] = 1.0f; float* x5427 = (float*)myMalloc(1 * sizeof(float));; x5427[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5425,x580,1,x5427, x1202, 1, x580,1)); arrayFill<<<28, 512>>>(x1202, 0.0f, 128); float* x5431 = (float*)myMalloc(1 * sizeof(float));; x5431[0] = 1.0f; float* x5433 = (float*)myMalloc(1 * sizeof(float));; x5433[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5431,x583,1,x5433, x1203, 1, x583,1)); arrayFill<<<28, 512>>>(x1203, 0.0f, 256); float* x5437 = (float*)myMalloc(1 * sizeof(float));; x5437[0] = 1.0f; float* x5439 = (float*)myMalloc(1 * sizeof(float));; x5439[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x5437,x586,256,x5439, x1204, 256, x586,256)); arrayFill<<<28, 512>>>(x1204, 0.0f, 262144); float* x5443 = (float*)myMalloc(1 * sizeof(float));; x5443[0] = 1.0f; float* x5445 = (float*)myMalloc(1 * sizeof(float));; x5445[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5443,x589,1,x5445, x1205, 1, x589,1)); arrayFill<<<28, 512>>>(x1205, 0.0f, 256); float* x5449 = (float*)myMalloc(1 * sizeof(float));; x5449[0] = 1.0f; float* x5451 = (float*)myMalloc(1 * sizeof(float));; x5451[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5449,x592,1,x5451, x1206, 1, x592,1)); arrayFill<<<28, 512>>>(x1206, 0.0f, 256); float* x5455 = (float*)myMalloc(1 * sizeof(float));; x5455[0] = 1.0f; float* x5457 = (float*)myMalloc(1 * sizeof(float));; x5457[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5455,x595,1,x5457, x1207, 1, x595,1)); arrayFill<<<28, 512>>>(x1207, 0.0f, 128); float* x5461 = (float*)myMalloc(1 * sizeof(float));; x5461[0] = 1.0f; float* x5463 = (float*)myMalloc(1 * sizeof(float));; x5463[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5461,x598,1,x5463, x1208, 1, x598,1)); arrayFill<<<28, 512>>>(x1208, 0.0f, 512); float* x5467 = (float*)myMalloc(1 * sizeof(float));; x5467[0] = 1.0f; float* x5469 = (float*)myMalloc(1 * sizeof(float));; x5469[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5467,x601,1,x5469, x1209, 1, x601,1)); arrayFill<<<28, 512>>>(x1209, 0.0f, 64); float* x5473 = (float*)myMalloc(1 * sizeof(float));; x5473[0] = 1.0f; float* x5475 = (float*)myMalloc(1 * sizeof(float));; x5475[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5473,x604,1,x5475, x1210, 1, x604,1)); arrayFill<<<28, 512>>>(x1210, 0.0f, 2048); float* x5479 = (float*)myMalloc(1 * sizeof(float));; x5479[0] = 1.0f; float* x5481 = (float*)myMalloc(1 * sizeof(float));; x5481[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5479,x607,1,x5481, x1211, 1, x607,1)); arrayFill<<<28, 512>>>(x1211, 0.0f, 256); float* x5485 = (float*)myMalloc(1 * sizeof(float));; x5485[0] = 1.0f; float* x5487 = (float*)myMalloc(1 * sizeof(float));; x5487[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5485,x610,1,x5487, x1212, 1, x610,1)); arrayFill<<<28, 512>>>(x1212, 0.0f, 64); float* x5491 = (float*)myMalloc(1 * sizeof(float));; x5491[0] = 1.0f; float* x5493 = (float*)myMalloc(1 * sizeof(float));; x5493[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 128,512,x5491,x613,128,x5493, x1213, 128, x613,128)); arrayFill<<<28, 512>>>(x1213, 0.0f, 65536); float* x5497 = (float*)myMalloc(1 * sizeof(float));; x5497[0] = 1.0f; float* x5499 = (float*)myMalloc(1 * sizeof(float));; x5499[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5497,x616,1,x5499, x1214, 1, x616,1)); arrayFill<<<28, 512>>>(x1214, 0.0f, 2048); float* x5503 = (float*)myMalloc(1 * sizeof(float));; x5503[0] = 1.0f; float* x5505 = (float*)myMalloc(1 * sizeof(float));; x5505[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5503,x619,1,x5505, x1215, 1, x619,1)); arrayFill<<<28, 512>>>(x1215, 0.0f, 256); float* x5509 = (float*)myMalloc(1 * sizeof(float));; x5509[0] = 1.0f; float* x5511 = (float*)myMalloc(1 * sizeof(float));; x5511[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5509,x622,1,x5511, x1216, 1, x622,1)); arrayFill<<<28, 512>>>(x1216, 0.0f, 256); float* x5515 = (float*)myMalloc(1 * sizeof(float));; x5515[0] = 1.0f; float* x5517 = (float*)myMalloc(1 * sizeof(float));; x5517[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5515,x625,1,x5517, x1217, 1, x625,1)); arrayFill<<<28, 512>>>(x1217, 0.0f, 64); float* x5521 = (float*)myMalloc(1 * sizeof(float));; x5521[0] = 1.0f; float* x5523 = (float*)myMalloc(1 * sizeof(float));; x5523[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 128,512,x5521,x628,128,x5523, x1218, 128, x628,128)); arrayFill<<<28, 512>>>(x1218, 0.0f, 65536); float* x5527 = (float*)myMalloc(1 * sizeof(float));; x5527[0] = 1.0f; float* x5529 = (float*)myMalloc(1 * sizeof(float));; x5529[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5527,x631,1,x5529, x1219, 1, x631,1)); arrayFill<<<28, 512>>>(x1219, 0.0f, 128); float* x5533 = (float*)myMalloc(1 * sizeof(float));; x5533[0] = 1.0f; float* x5535 = (float*)myMalloc(1 * sizeof(float));; x5535[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5533,x634,1,x5535, x1220, 1, x634,1)); arrayFill<<<28, 512>>>(x1220, 0.0f, 512); float* x5539 = (float*)myMalloc(1 * sizeof(float));; x5539[0] = 1.0f; float* x5541 = (float*)myMalloc(1 * sizeof(float));; x5541[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5539,x637,1,x5541, x1221, 1, x637,1)); arrayFill<<<28, 512>>>(x1221, 0.0f, 64); float* x5545 = (float*)myMalloc(1 * sizeof(float));; x5545[0] = 1.0f; float* x5547 = (float*)myMalloc(1 * sizeof(float));; x5547[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5545,x640,1,x5547, x1222, 1, x640,1)); arrayFill<<<28, 512>>>(x1222, 0.0f, 2048); float* x5551 = (float*)myMalloc(1 * sizeof(float));; x5551[0] = 1.0f; float* x5553 = (float*)myMalloc(1 * sizeof(float));; x5553[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x5551,x643,256,x5553, x1223, 256, x643,256)); arrayFill<<<28, 512>>>(x1223, 0.0f, 262144); float* x5557 = (float*)myMalloc(1 * sizeof(float));; x5557[0] = 1.0f; float* x5559 = (float*)myMalloc(1 * sizeof(float));; x5559[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5557,x646,1,x5559, x1224, 1, x646,1)); arrayFill<<<28, 512>>>(x1224, 0.0f, 1024); float* x5563 = (float*)myMalloc(1 * sizeof(float));; x5563[0] = 1.0f; float* x5565 = (float*)myMalloc(1 * sizeof(float));; x5565[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5563,x649,1,x5565, x1225, 1, x649,1)); arrayFill<<<28, 512>>>(x1225, 0.0f, 64); float* x5569 = (float*)myMalloc(1 * sizeof(float));; x5569[0] = 1.0f; float* x5571 = (float*)myMalloc(1 * sizeof(float));; x5571[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5569,x652,1,x5571, x1226, 1, x652,1)); arrayFill<<<28, 512>>>(x1226, 0.0f, 512); float* x5575 = (float*)myMalloc(1 * sizeof(float));; x5575[0] = 1.0f; float* x5577 = (float*)myMalloc(1 * sizeof(float));; x5577[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5575,x655,1,x5577, x1227, 1, x655,1)); arrayFill<<<28, 512>>>(x1227, 0.0f, 1024); float* x5581 = (float*)myMalloc(1 * sizeof(float));; x5581[0] = 1.0f; float* x5583 = (float*)myMalloc(1 * sizeof(float));; x5583[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5581,x658,1,x5583, x1228, 1, x658,1)); arrayFill<<<28, 512>>>(x1228, 0.0f, 512); float* x5587 = (float*)myMalloc(1 * sizeof(float));; x5587[0] = 1.0f; float* x5589 = (float*)myMalloc(1 * sizeof(float));; x5589[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5587,x661,1,x5589, x1229, 1, x661,1)); arrayFill<<<28, 512>>>(x1229, 0.0f, 1024); float* x5593 = (float*)myMalloc(1 * sizeof(float));; x5593[0] = 1.0f; float* x5595 = (float*)myMalloc(1 * sizeof(float));; x5595[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5593,x664,1,x5595, x1230, 1, x664,1)); arrayFill<<<28, 512>>>(x1230, 0.0f, 2048); float* x5599 = (float*)myMalloc(1 * sizeof(float));; x5599[0] = 1.0f; float* x5601 = (float*)myMalloc(1 * sizeof(float));; x5601[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5599,x667,1,x5601, x1231, 1, x667,1)); arrayFill<<<28, 512>>>(x1231, 0.0f, 256); float* x5605 = (float*)myMalloc(1 * sizeof(float));; x5605[0] = 1.0f; float* x5607 = (float*)myMalloc(1 * sizeof(float));; x5607[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5605,x670,1,x5607, x1232, 1, x670,1)); arrayFill<<<28, 512>>>(x1232, 0.0f, 2048); float* x5611 = (float*)myMalloc(1 * sizeof(float));; x5611[0] = 1.0f; float* x5613 = (float*)myMalloc(1 * sizeof(float));; x5613[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5611,x673,1,x5613, x1233, 1, x673,1)); arrayFill<<<28, 512>>>(x1233, 0.0f, 256); float* x5617 = (float*)myMalloc(1 * sizeof(float));; x5617[0] = 1.0f; float* x5619 = (float*)myMalloc(1 * sizeof(float));; x5619[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5617,x676,1,x5619, x1234, 1, x676,1)); arrayFill<<<28, 512>>>(x1234, 0.0f, 128); float* x5623 = (float*)myMalloc(1 * sizeof(float));; x5623[0] = 1.0f; float* x5625 = (float*)myMalloc(1 * sizeof(float));; x5625[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5623,x679,1,x5625, x1235, 1, x679,1)); arrayFill<<<28, 512>>>(x1235, 0.0f, 128); float* x5629 = (float*)myMalloc(1 * sizeof(float));; x5629[0] = 1.0f; float* x5631 = (float*)myMalloc(1 * sizeof(float));; x5631[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5629,x682,1,x5631, x1236, 1, x682,1)); arrayFill<<<28, 512>>>(x1236, 0.0f, 256); float* x5635 = (float*)myMalloc(1 * sizeof(float));; x5635[0] = 1.0f; float* x5637 = (float*)myMalloc(1 * sizeof(float));; x5637[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,256,x5635,x685,64,x5637, x1237, 64, x685,64)); arrayFill<<<28, 512>>>(x1237, 0.0f, 16384); float* x5641 = (float*)myMalloc(1 * sizeof(float));; x5641[0] = 1.0f; float* x5643 = (float*)myMalloc(1 * sizeof(float));; x5643[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5641,x688,1,x5643, x1238, 1, x688,1)); arrayFill<<<28, 512>>>(x1238, 0.0f, 256); float* x5647 = (float*)myMalloc(1 * sizeof(float));; x5647[0] = 1.0f; float* x5649 = (float*)myMalloc(1 * sizeof(float));; x5649[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,128,x5647,x691,512,x5649, x1239, 512, x691,512)); arrayFill<<<28, 512>>>(x1239, 0.0f, 65536); float* x5653 = (float*)myMalloc(1 * sizeof(float));; x5653[0] = 1.0f; float* x5655 = (float*)myMalloc(1 * sizeof(float));; x5655[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5653,x694,1,x5655, x1240, 1, x694,1)); arrayFill<<<28, 512>>>(x1240, 0.0f, 256); float* x5659 = (float*)myMalloc(1 * sizeof(float));; x5659[0] = 1.0f; float* x5661 = (float*)myMalloc(1 * sizeof(float));; x5661[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5659,x697,1,x5661, x1241, 1, x697,1)); arrayFill<<<28, 512>>>(x1241, 0.0f, 128); float* x5665 = (float*)myMalloc(1 * sizeof(float));; x5665[0] = 1.0f; float* x5667 = (float*)myMalloc(1 * sizeof(float));; x5667[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5665,x700,1,x5667, x1242, 1, x700,1)); arrayFill<<<28, 512>>>(x1242, 0.0f, 64); float* x5671 = (float*)myMalloc(1 * sizeof(float));; x5671[0] = 1.0f; float* x5673 = (float*)myMalloc(1 * sizeof(float));; x5673[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5671,x703,1,x5673, x1243, 1, x703,1)); arrayFill<<<28, 512>>>(x1243, 0.0f, 256); float* x5677 = (float*)myMalloc(1 * sizeof(float));; x5677[0] = 1.0f; float* x5679 = (float*)myMalloc(1 * sizeof(float));; x5679[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5677,x706,1,x5679, x1244, 1, x706,1)); arrayFill<<<28, 512>>>(x1244, 0.0f, 512); float* x5683 = (float*)myMalloc(1 * sizeof(float));; x5683[0] = 1.0f; float* x5685 = (float*)myMalloc(1 * sizeof(float));; x5685[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5683,x709,1,x5685, x1245, 1, x709,1)); arrayFill<<<28, 512>>>(x1245, 0.0f, 512); float* x5689 = (float*)myMalloc(1 * sizeof(float));; x5689[0] = 1.0f; float* x5691 = (float*)myMalloc(1 * sizeof(float));; x5691[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,512,x5689,x712,1024,x5691, x1246, 1024, x712,1024)); arrayFill<<<28, 512>>>(x1246, 0.0f, 524288); float* x5695 = (float*)myMalloc(1 * sizeof(float));; x5695[0] = 1.0f; float* x5697 = (float*)myMalloc(1 * sizeof(float));; x5697[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5695,x715,1,x5697, x1247, 1, x715,1)); arrayFill<<<28, 512>>>(x1247, 0.0f, 1024); float* x5701 = (float*)myMalloc(1 * sizeof(float));; x5701[0] = 1.0f; float* x5703 = (float*)myMalloc(1 * sizeof(float));; x5703[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5701,x718,1,x5703, x1248, 1, x718,1)); arrayFill<<<28, 512>>>(x1248, 0.0f, 256); float* x5707 = (float*)myMalloc(1 * sizeof(float));; x5707[0] = 1.0f; float* x5709 = (float*)myMalloc(1 * sizeof(float));; x5709[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5707,x721,1,x5709, x1249, 1, x721,1)); arrayFill<<<28, 512>>>(x1249, 0.0f, 64); float* x5713 = (float*)myMalloc(1 * sizeof(float));; x5713[0] = 1.0f; float* x5715 = (float*)myMalloc(1 * sizeof(float));; x5715[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5713,x724,1,x5715, x1250, 1, x724,1)); arrayFill<<<28, 512>>>(x1250, 0.0f, 1024); float* x5719 = (float*)myMalloc(1 * sizeof(float));; x5719[0] = 1.0f; float* x5721 = (float*)myMalloc(1 * sizeof(float));; x5721[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5719,x727,1,x5721, x1251, 1, x727,1)); arrayFill<<<28, 512>>>(x1251, 0.0f, 2048); float* x5725 = (float*)myMalloc(1 * sizeof(float));; x5725[0] = 1.0f; float* x5727 = (float*)myMalloc(1 * sizeof(float));; x5727[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5725,x730,1,x5727, x1252, 1, x730,1)); arrayFill<<<28, 512>>>(x1252, 0.0f, 512); float* x5731 = (float*)myMalloc(1 * sizeof(float));; x5731[0] = 1.0f; float* x5733 = (float*)myMalloc(1 * sizeof(float));; x5733[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5731,x733,1,x5733, x1253, 1, x733,1)); arrayFill<<<28, 512>>>(x1253, 0.0f, 1024); float* x5737 = (float*)myMalloc(1 * sizeof(float));; x5737[0] = 1.0f; float* x5739 = (float*)myMalloc(1 * sizeof(float));; x5739[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5737,x736,1,x5739, x1254, 1, x736,1)); arrayFill<<<28, 512>>>(x1254, 0.0f, 512); float* x5743 = (float*)myMalloc(1 * sizeof(float));; x5743[0] = 1.0f; float* x5745 = (float*)myMalloc(1 * sizeof(float));; x5745[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5743,x739,1,x5745, x1255, 1, x739,1)); arrayFill<<<28, 512>>>(x1255, 0.0f, 128); float* x5749 = (float*)myMalloc(1 * sizeof(float));; x5749[0] = 1.0f; float* x5751 = (float*)myMalloc(1 * sizeof(float));; x5751[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5749,x742,1,x5751, x1256, 1, x742,1)); arrayFill<<<28, 512>>>(x1256, 0.0f, 512); float* x5755 = (float*)myMalloc(1 * sizeof(float));; x5755[0] = 1.0f; float* x5757 = (float*)myMalloc(1 * sizeof(float));; x5757[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,64,x5755,x745,256,x5757, x1257, 256, x745,256)); arrayFill<<<28, 512>>>(x1257, 0.0f, 16384); float* x5761 = (float*)myMalloc(1 * sizeof(float));; x5761[0] = 1.0f; float* x5763 = (float*)myMalloc(1 * sizeof(float));; x5763[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,256,x5761,x748,1024,x5763, x1258, 1024, x748,1024)); arrayFill<<<28, 512>>>(x1258, 0.0f, 262144); float* x5767 = (float*)myMalloc(1 * sizeof(float));; x5767[0] = 1.0f; float* x5769 = (float*)myMalloc(1 * sizeof(float));; x5769[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 27,64,x5767,x751,27,x5769, x1259, 27, x751,27)); arrayFill<<<28, 512>>>(x1259, 0.0f, 1728); float* x5773 = (float*)myMalloc(1 * sizeof(float));; x5773[0] = 1.0f; float* x5775 = (float*)myMalloc(1 * sizeof(float));; x5775[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5773,x754,1,x5775, x1260, 1, x754,1)); arrayFill<<<28, 512>>>(x1260, 0.0f, 64); float* x5779 = (float*)myMalloc(1 * sizeof(float));; x5779[0] = 1.0f; float* x5781 = (float*)myMalloc(1 * sizeof(float));; x5781[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5779,x757,1,x5781, x1261, 1, x757,1)); arrayFill<<<28, 512>>>(x1261, 0.0f, 512); float* x5785 = (float*)myMalloc(1 * sizeof(float));; x5785[0] = 1.0f; float* x5787 = (float*)myMalloc(1 * sizeof(float));; x5787[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 4608,512,x5785,x760,4608,x5787, x1262, 4608, x760,4608)); arrayFill<<<28, 512>>>(x1262, 0.0f, 2359296); float* x5791 = (float*)myMalloc(1 * sizeof(float));; x5791[0] = 1.0f; float* x5793 = (float*)myMalloc(1 * sizeof(float));; x5793[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5791,x763,1,x5793, x1263, 1, x763,1)); arrayFill<<<28, 512>>>(x1263, 0.0f, 512); float* x5797 = (float*)myMalloc(1 * sizeof(float));; x5797[0] = 1.0f; float* x5799 = (float*)myMalloc(1 * sizeof(float));; x5799[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5797,x766,1,x5799, x1264, 1, x766,1)); arrayFill<<<28, 512>>>(x1264, 0.0f, 256); float* x5803 = (float*)myMalloc(1 * sizeof(float));; x5803[0] = 1.0f; float* x5805 = (float*)myMalloc(1 * sizeof(float));; x5805[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5803,x769,1,x5805, x1265, 1, x769,1)); arrayFill<<<28, 512>>>(x1265, 0.0f, 64); float* x5809 = (float*)myMalloc(1 * sizeof(float));; x5809[0] = 1.0f; float* x5811 = (float*)myMalloc(1 * sizeof(float));; x5811[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5809,x772,1,x5811, x1266, 1, x772,1)); arrayFill<<<28, 512>>>(x1266, 0.0f, 512); float* x5815 = (float*)myMalloc(1 * sizeof(float));; x5815[0] = 1.0f; float* x5817 = (float*)myMalloc(1 * sizeof(float));; x5817[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5815,x775,1,x5817, x1267, 1, x775,1)); arrayFill<<<28, 512>>>(x1267, 0.0f, 512); float* x5821 = (float*)myMalloc(1 * sizeof(float));; x5821[0] = 1.0f; float* x5823 = (float*)myMalloc(1 * sizeof(float));; x5823[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5821,x778,1,x5823, x1268, 1, x778,1)); arrayFill<<<28, 512>>>(x1268, 0.0f, 1024); float* x5827 = (float*)myMalloc(1 * sizeof(float));; x5827[0] = 1.0f; float* x5829 = (float*)myMalloc(1 * sizeof(float));; x5829[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,256,x5827,x781,64,x5829, x1269, 64, x781,64)); arrayFill<<<28, 512>>>(x1269, 0.0f, 16384); float* x5833 = (float*)myMalloc(1 * sizeof(float));; x5833[0] = 1.0f; float* x5835 = (float*)myMalloc(1 * sizeof(float));; x5835[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5833,x784,1,x5835, x1270, 1, x784,1)); arrayFill<<<28, 512>>>(x1270, 0.0f, 256); float* x5839 = (float*)myMalloc(1 * sizeof(float));; x5839[0] = 1.0f; float* x5841 = (float*)myMalloc(1 * sizeof(float));; x5841[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5839,x787,1,x5841, x1271, 1, x787,1)); arrayFill<<<28, 512>>>(x1271, 0.0f, 64); float* x5845 = (float*)myMalloc(1 * sizeof(float));; x5845[0] = 1.0f; float* x5847 = (float*)myMalloc(1 * sizeof(float));; x5847[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1152,128,x5845,x790,1152,x5847, x1272, 1152, x790,1152)); arrayFill<<<28, 512>>>(x1272, 0.0f, 147456); float* x5851 = (float*)myMalloc(1 * sizeof(float));; x5851[0] = 1.0f; float* x5853 = (float*)myMalloc(1 * sizeof(float));; x5853[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5851,x793,1,x5853, x1273, 1, x793,1)); arrayFill<<<28, 512>>>(x1273, 0.0f, 256); float* x5857 = (float*)myMalloc(1 * sizeof(float));; x5857[0] = 1.0f; float* x5859 = (float*)myMalloc(1 * sizeof(float));; x5859[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5857,x796,1,x5859, x1274, 1, x796,1)); arrayFill<<<28, 512>>>(x1274, 0.0f, 512); float* x5863 = (float*)myMalloc(1 * sizeof(float));; x5863[0] = 1.0f; float* x5865 = (float*)myMalloc(1 * sizeof(float));; x5865[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5863,x799,1,x5865, x1275, 1, x799,1)); arrayFill<<<28, 512>>>(x1275, 0.0f, 256); float* x5869 = (float*)myMalloc(1 * sizeof(float));; x5869[0] = 1.0f; float* x5871 = (float*)myMalloc(1 * sizeof(float));; x5871[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x5869,x802,1,x5871, x1276, 1, x802,1)); arrayFill<<<28, 512>>>(x1276, 0.0f, 512); float* x5875 = (float*)myMalloc(1 * sizeof(float));; x5875[0] = 1.0f; float* x5877 = (float*)myMalloc(1 * sizeof(float));; x5877[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5875,x805,1,x5877, x1277, 1, x805,1)); arrayFill<<<28, 512>>>(x1277, 0.0f, 128); float* x5881 = (float*)myMalloc(1 * sizeof(float));; x5881[0] = 1.0f; float* x5883 = (float*)myMalloc(1 * sizeof(float));; x5883[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,64,x5881,x808,256,x5883, x1278, 256, x808,256)); arrayFill<<<28, 512>>>(x1278, 0.0f, 16384); float* x5887 = (float*)myMalloc(1 * sizeof(float));; x5887[0] = 1.0f; float* x5889 = (float*)myMalloc(1 * sizeof(float));; x5889[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5887,x811,1,x5889, x1279, 1, x811,1)); arrayFill<<<28, 512>>>(x1279, 0.0f, 128); float* x5893 = (float*)myMalloc(1 * sizeof(float));; x5893[0] = 1.0f; float* x5895 = (float*)myMalloc(1 * sizeof(float));; x5895[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5893,x814,1,x5895, x1280, 1, x814,1)); arrayFill<<<28, 512>>>(x1280, 0.0f, 2048); float* x5899 = (float*)myMalloc(1 * sizeof(float));; x5899[0] = 1.0f; float* x5901 = (float*)myMalloc(1 * sizeof(float));; x5901[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5899,x817,1,x5901, x1281, 1, x817,1)); arrayFill<<<28, 512>>>(x1281, 0.0f, 256); float* x5905 = (float*)myMalloc(1 * sizeof(float));; x5905[0] = 1.0f; float* x5907 = (float*)myMalloc(1 * sizeof(float));; x5907[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x5905,x820,2304,x5907, x1282, 2304, x820,2304)); arrayFill<<<28, 512>>>(x1282, 0.0f, 589824); float* x5911 = (float*)myMalloc(1 * sizeof(float));; x5911[0] = 1.0f; float* x5913 = (float*)myMalloc(1 * sizeof(float));; x5913[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5911,x823,1,x5913, x1283, 1, x823,1)); arrayFill<<<28, 512>>>(x1283, 0.0f, 256); float* x5917 = (float*)myMalloc(1 * sizeof(float));; x5917[0] = 1.0f; float* x5919 = (float*)myMalloc(1 * sizeof(float));; x5919[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5917,x826,1,x5919, x1284, 1, x826,1)); arrayFill<<<28, 512>>>(x1284, 0.0f, 128); float* x5923 = (float*)myMalloc(1 * sizeof(float));; x5923[0] = 1.0f; float* x5925 = (float*)myMalloc(1 * sizeof(float));; x5925[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5923,x829,1,x5925, x1285, 1, x829,1)); arrayFill<<<28, 512>>>(x1285, 0.0f, 256); float* x5929 = (float*)myMalloc(1 * sizeof(float));; x5929[0] = 1.0f; float* x5931 = (float*)myMalloc(1 * sizeof(float));; x5931[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5929,x832,1,x5931, x1286, 1, x832,1)); arrayFill<<<28, 512>>>(x1286, 0.0f, 64); float* x5935 = (float*)myMalloc(1 * sizeof(float));; x5935[0] = 1.0f; float* x5937 = (float*)myMalloc(1 * sizeof(float));; x5937[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,256,x5935,x835,512,x5937, x1287, 512, x835,512)); arrayFill<<<28, 512>>>(x1287, 0.0f, 131072); float* x5941 = (float*)myMalloc(1 * sizeof(float));; x5941[0] = 1.0f; float* x5943 = (float*)myMalloc(1 * sizeof(float));; x5943[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x5941,x838,1,x5943, x1288, 1, x838,1)); arrayFill<<<28, 512>>>(x1288, 0.0f, 2048); float* x5947 = (float*)myMalloc(1 * sizeof(float));; x5947[0] = 1.0f; float* x5949 = (float*)myMalloc(1 * sizeof(float));; x5949[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5947,x841,1,x5949, x1289, 1, x841,1)); arrayFill<<<28, 512>>>(x1289, 0.0f, 1024); float* x5953 = (float*)myMalloc(1 * sizeof(float));; x5953[0] = 1.0f; float* x5955 = (float*)myMalloc(1 * sizeof(float));; x5955[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5953,x844,1,x5955, x1290, 1, x844,1)); arrayFill<<<28, 512>>>(x1290, 0.0f, 1024); float* x5959 = (float*)myMalloc(1 * sizeof(float));; x5959[0] = 1.0f; float* x5961 = (float*)myMalloc(1 * sizeof(float));; x5961[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5959,x847,1,x5961, x1291, 1, x847,1)); arrayFill<<<28, 512>>>(x1291, 0.0f, 256); float* x5965 = (float*)myMalloc(1 * sizeof(float));; x5965[0] = 1.0f; float* x5967 = (float*)myMalloc(1 * sizeof(float));; x5967[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5965,x850,1,x5967, x1292, 1, x850,1)); arrayFill<<<28, 512>>>(x1292, 0.0f, 256); float* x5971 = (float*)myMalloc(1 * sizeof(float));; x5971[0] = 1.0f; float* x5973 = (float*)myMalloc(1 * sizeof(float));; x5973[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5971,x853,1,x5973, x1293, 1, x853,1)); arrayFill<<<28, 512>>>(x1293, 0.0f, 256); float* x5977 = (float*)myMalloc(1 * sizeof(float));; x5977[0] = 1.0f; float* x5979 = (float*)myMalloc(1 * sizeof(float));; x5979[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x5977,x856,1,x5979, x1294, 1, x856,1)); arrayFill<<<28, 512>>>(x1294, 0.0f, 64); float* x5983 = (float*)myMalloc(1 * sizeof(float));; x5983[0] = 1.0f; float* x5985 = (float*)myMalloc(1 * sizeof(float));; x5985[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x5983,x859,1,x5985, x1295, 1, x859,1)); arrayFill<<<28, 512>>>(x1295, 0.0f, 1024); float* x5989 = (float*)myMalloc(1 * sizeof(float));; x5989[0] = 1.0f; float* x5991 = (float*)myMalloc(1 * sizeof(float));; x5991[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x5989,x862,1,x5991, x1296, 1, x862,1)); arrayFill<<<28, 512>>>(x1296, 0.0f, 256); float* x5995 = (float*)myMalloc(1 * sizeof(float));; x5995[0] = 1.0f; float* x5997 = (float*)myMalloc(1 * sizeof(float));; x5997[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x5995,x865,1,x5997, x1297, 1, x865,1)); arrayFill<<<28, 512>>>(x1297, 0.0f, 128); float* x6001 = (float*)myMalloc(1 * sizeof(float));; x6001[0] = 1.0f; float* x6003 = (float*)myMalloc(1 * sizeof(float));; x6003[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1152,128,x6001,x868,1152,x6003, x1298, 1152, x868,1152)); arrayFill<<<28, 512>>>(x1298, 0.0f, 147456); float* x6007 = (float*)myMalloc(1 * sizeof(float));; x6007[0] = 1.0f; float* x6009 = (float*)myMalloc(1 * sizeof(float));; x6009[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6007,x871,1,x6009, x1299, 1, x871,1)); arrayFill<<<28, 512>>>(x1299, 0.0f, 256); float* x6013 = (float*)myMalloc(1 * sizeof(float));; x6013[0] = 1.0f; float* x6015 = (float*)myMalloc(1 * sizeof(float));; x6015[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x6013,x874,1,x6015, x1300, 1, x874,1)); arrayFill<<<28, 512>>>(x1300, 0.0f, 2048); float* x6019 = (float*)myMalloc(1 * sizeof(float));; x6019[0] = 1.0f; float* x6021 = (float*)myMalloc(1 * sizeof(float));; x6021[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6019,x877,1,x6021, x1301, 1, x877,1)); arrayFill<<<28, 512>>>(x1301, 0.0f, 512); float* x6025 = (float*)myMalloc(1 * sizeof(float));; x6025[0] = 1.0f; float* x6027 = (float*)myMalloc(1 * sizeof(float));; x6027[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6025,x880,1,x6027, x1302, 1, x880,1)); arrayFill<<<28, 512>>>(x1302, 0.0f, 512); float* x6031 = (float*)myMalloc(1 * sizeof(float));; x6031[0] = 1.0f; float* x6033 = (float*)myMalloc(1 * sizeof(float));; x6033[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,128,x6031,x883,512,x6033, x1303, 512, x883,512)); arrayFill<<<28, 512>>>(x1303, 0.0f, 65536); float* x6037 = (float*)myMalloc(1 * sizeof(float));; x6037[0] = 1.0f; float* x6039 = (float*)myMalloc(1 * sizeof(float));; x6039[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6037,x886,1,x6039, x1304, 1, x886,1)); arrayFill<<<28, 512>>>(x1304, 0.0f, 256); float* x6043 = (float*)myMalloc(1 * sizeof(float));; x6043[0] = 1.0f; float* x6045 = (float*)myMalloc(1 * sizeof(float));; x6045[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6043,x889,1,x6045, x1305, 1, x889,1)); arrayFill<<<28, 512>>>(x1305, 0.0f, 256); float* x6049 = (float*)myMalloc(1 * sizeof(float));; x6049[0] = 1.0f; float* x6051 = (float*)myMalloc(1 * sizeof(float));; x6051[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6049,x892,1,x6051, x1306, 1, x892,1)); arrayFill<<<28, 512>>>(x1306, 0.0f, 256); float* x6055 = (float*)myMalloc(1 * sizeof(float));; x6055[0] = 1.0f; float* x6057 = (float*)myMalloc(1 * sizeof(float));; x6057[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6055,x895,1,x6057, x1307, 1, x895,1)); arrayFill<<<28, 512>>>(x1307, 0.0f, 256); float* x6061 = (float*)myMalloc(1 * sizeof(float));; x6061[0] = 1.0f; float* x6063 = (float*)myMalloc(1 * sizeof(float));; x6063[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6061,x898,1,x6063, x1308, 1, x898,1)); arrayFill<<<28, 512>>>(x1308, 0.0f, 512); float* x6067 = (float*)myMalloc(1 * sizeof(float));; x6067[0] = 1.0f; float* x6069 = (float*)myMalloc(1 * sizeof(float));; x6069[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6067,x901,1,x6069, x1309, 1, x901,1)); arrayFill<<<28, 512>>>(x1309, 0.0f, 512); float* x6073 = (float*)myMalloc(1 * sizeof(float));; x6073[0] = 1.0f; float* x6075 = (float*)myMalloc(1 * sizeof(float));; x6075[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6073,x904,1,x6075, x1310, 1, x904,1)); arrayFill<<<28, 512>>>(x1310, 0.0f, 256); float* x6079 = (float*)myMalloc(1 * sizeof(float));; x6079[0] = 1.0f; float* x6081 = (float*)myMalloc(1 * sizeof(float));; x6081[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6079,x907,1,x6081, x1311, 1, x907,1)); arrayFill<<<28, 512>>>(x1311, 0.0f, 128); float* x6085 = (float*)myMalloc(1 * sizeof(float));; x6085[0] = 1.0f; float* x6087 = (float*)myMalloc(1 * sizeof(float));; x6087[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6085,x910,1,x6087, x1312, 1, x910,1)); arrayFill<<<28, 512>>>(x1312, 0.0f, 512); float* x6091 = (float*)myMalloc(1 * sizeof(float));; x6091[0] = 1.0f; float* x6093 = (float*)myMalloc(1 * sizeof(float));; x6093[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x6091,x913,1,x6093, x1313, 1, x913,1)); arrayFill<<<28, 512>>>(x1313, 0.0f, 64); float* x6097 = (float*)myMalloc(1 * sizeof(float));; x6097[0] = 1.0f; float* x6099 = (float*)myMalloc(1 * sizeof(float));; x6099[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6097,x916,1,x6099, x1314, 1, x916,1)); arrayFill<<<28, 512>>>(x1314, 0.0f, 512); float* x6103 = (float*)myMalloc(1 * sizeof(float));; x6103[0] = 1.0f; float* x6105 = (float*)myMalloc(1 * sizeof(float));; x6105[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x6103,x919,1,x6105, x1315, 1, x919,1)); arrayFill<<<28, 512>>>(x1315, 0.0f, 64); float* x6109 = (float*)myMalloc(1 * sizeof(float));; x6109[0] = 1.0f; float* x6111 = (float*)myMalloc(1 * sizeof(float));; x6111[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6109,x922,1,x6111, x1316, 1, x922,1)); arrayFill<<<28, 512>>>(x1316, 0.0f, 1024); float* x6115 = (float*)myMalloc(1 * sizeof(float));; x6115[0] = 1.0f; float* x6117 = (float*)myMalloc(1 * sizeof(float));; x6117[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6115,x925,1,x6117, x1317, 1, x925,1)); arrayFill<<<28, 512>>>(x1317, 0.0f, 512); float* x6121 = (float*)myMalloc(1 * sizeof(float));; x6121[0] = 1.0f; float* x6123 = (float*)myMalloc(1 * sizeof(float));; x6123[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6121,x928,1,x6123, x1318, 1, x928,1)); arrayFill<<<28, 512>>>(x1318, 0.0f, 1024); float* x6127 = (float*)myMalloc(1 * sizeof(float));; x6127[0] = 1.0f; float* x6129 = (float*)myMalloc(1 * sizeof(float));; x6129[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,2048,x6127,x931,512,x6129, x1319, 512, x931,512)); arrayFill<<<28, 512>>>(x1319, 0.0f, 1048576); float* x6133 = (float*)myMalloc(1 * sizeof(float));; x6133[0] = 1.0f; float* x6135 = (float*)myMalloc(1 * sizeof(float));; x6135[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6133,x934,1,x6135, x1320, 1, x934,1)); arrayFill<<<28, 512>>>(x1320, 0.0f, 512); float* x6139 = (float*)myMalloc(1 * sizeof(float));; x6139[0] = 1.0f; float* x6141 = (float*)myMalloc(1 * sizeof(float));; x6141[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,2048,x6139,x937,1024,x6141, x1321, 1024, x937,1024)); arrayFill<<<28, 512>>>(x1321, 0.0f, 2097152); float* x6145 = (float*)myMalloc(1 * sizeof(float));; x6145[0] = 1.0f; float* x6147 = (float*)myMalloc(1 * sizeof(float));; x6147[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2048,512,x6145,x940,2048,x6147, x1322, 2048, x940,2048)); arrayFill<<<28, 512>>>(x1322, 0.0f, 1048576); float* x6151 = (float*)myMalloc(1 * sizeof(float));; x6151[0] = 1.0f; float* x6153 = (float*)myMalloc(1 * sizeof(float));; x6153[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6151,x943,1,x6153, x1323, 1, x943,1)); arrayFill<<<28, 512>>>(x1323, 0.0f, 1024); float* x6157 = (float*)myMalloc(1 * sizeof(float));; x6157[0] = 1.0f; float* x6159 = (float*)myMalloc(1 * sizeof(float));; x6159[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6157,x946,1,x6159, x1324, 1, x946,1)); arrayFill<<<28, 512>>>(x1324, 0.0f, 128); float* x6163 = (float*)myMalloc(1 * sizeof(float));; x6163[0] = 1.0f; float* x6165 = (float*)myMalloc(1 * sizeof(float));; x6165[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1024,256,x6163,x949,1024,x6165, x1325, 1024, x949,1024)); arrayFill<<<28, 512>>>(x1325, 0.0f, 262144); float* x6169 = (float*)myMalloc(1 * sizeof(float));; x6169[0] = 1.0f; float* x6171 = (float*)myMalloc(1 * sizeof(float));; x6171[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6169,x952,1,x6171, x1326, 1, x952,1)); arrayFill<<<28, 512>>>(x1326, 0.0f, 256); float* x6175 = (float*)myMalloc(1 * sizeof(float));; x6175[0] = 1.0f; float* x6177 = (float*)myMalloc(1 * sizeof(float));; x6177[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6175,x955,1,x6177, x1327, 1, x955,1)); arrayFill<<<28, 512>>>(x1327, 0.0f, 1024); float* x6181 = (float*)myMalloc(1 * sizeof(float));; x6181[0] = 1.0f; float* x6183 = (float*)myMalloc(1 * sizeof(float));; x6183[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x6181,x958,256,x6183, x1328, 256, x958,256)); arrayFill<<<28, 512>>>(x1328, 0.0f, 262144); float* x6187 = (float*)myMalloc(1 * sizeof(float));; x6187[0] = 1.0f; float* x6189 = (float*)myMalloc(1 * sizeof(float));; x6189[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6187,x961,1,x6189, x1329, 1, x961,1)); arrayFill<<<28, 512>>>(x1329, 0.0f, 128); float* x6193 = (float*)myMalloc(1 * sizeof(float));; x6193[0] = 1.0f; float* x6195 = (float*)myMalloc(1 * sizeof(float));; x6195[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6193,x964,1,x6195, x1330, 1, x964,1)); arrayFill<<<28, 512>>>(x1330, 0.0f, 512); float* x6199 = (float*)myMalloc(1 * sizeof(float));; x6199[0] = 1.0f; float* x6201 = (float*)myMalloc(1 * sizeof(float));; x6201[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6199,x967,1,x6201, x1331, 1, x967,1)); arrayFill<<<28, 512>>>(x1331, 0.0f, 512); float* x6205 = (float*)myMalloc(1 * sizeof(float));; x6205[0] = 1.0f; float* x6207 = (float*)myMalloc(1 * sizeof(float));; x6207[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6205,x970,1,x6207, x1332, 1, x970,1)); arrayFill<<<28, 512>>>(x1332, 0.0f, 128); float* x6211 = (float*)myMalloc(1 * sizeof(float));; x6211[0] = 1.0f; float* x6213 = (float*)myMalloc(1 * sizeof(float));; x6213[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x6211,x973,2304,x6213, x1333, 2304, x973,2304)); arrayFill<<<28, 512>>>(x1333, 0.0f, 589824); float* x6217 = (float*)myMalloc(1 * sizeof(float));; x6217[0] = 1.0f; float* x6219 = (float*)myMalloc(1 * sizeof(float));; x6219[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2048,10,x6217,x976,2048,x6219, x1334, 2048, x976,2048)); arrayFill<<<28, 512>>>(x1334, 0.0f, 20480); float* x6223 = (float*)myMalloc(1 * sizeof(float));; x6223[0] = 1.0f; float* x6225 = (float*)myMalloc(1 * sizeof(float));; x6225[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6223,x979,1,x6225, x1335, 1, x979,1)); arrayFill<<<28, 512>>>(x1335, 0.0f, 256); float* x6229 = (float*)myMalloc(1 * sizeof(float));; x6229[0] = 1.0f; float* x6231 = (float*)myMalloc(1 * sizeof(float));; x6231[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6229,x982,1,x6231, x1336, 1, x982,1)); arrayFill<<<28, 512>>>(x1336, 0.0f, 256); float* x6235 = (float*)myMalloc(1 * sizeof(float));; x6235[0] = 1.0f; float* x6237 = (float*)myMalloc(1 * sizeof(float));; x6237[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6235,x985,1,x6237, x1337, 1, x985,1)); arrayFill<<<28, 512>>>(x1337, 0.0f, 256); float* x6241 = (float*)myMalloc(1 * sizeof(float));; x6241[0] = 1.0f; float* x6243 = (float*)myMalloc(1 * sizeof(float));; x6243[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6241,x988,1,x6243, x1338, 1, x988,1)); arrayFill<<<28, 512>>>(x1338, 0.0f, 1024); float* x6247 = (float*)myMalloc(1 * sizeof(float));; x6247[0] = 1.0f; float* x6249 = (float*)myMalloc(1 * sizeof(float));; x6249[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6247,x991,1,x6249, x1339, 1, x991,1)); arrayFill<<<28, 512>>>(x1339, 0.0f, 1024); float* x6253 = (float*)myMalloc(1 * sizeof(float));; x6253[0] = 1.0f; float* x6255 = (float*)myMalloc(1 * sizeof(float));; x6255[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 64,64,x6253,x994,64,x6255, x1340, 64, x994,64)); arrayFill<<<28, 512>>>(x1340, 0.0f, 4096); float* x6259 = (float*)myMalloc(1 * sizeof(float));; x6259[0] = 1.0f; float* x6261 = (float*)myMalloc(1 * sizeof(float));; x6261[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6259,x997,1,x6261, x1341, 1, x997,1)); arrayFill<<<28, 512>>>(x1341, 0.0f, 512); float* x6265 = (float*)myMalloc(1 * sizeof(float));; x6265[0] = 1.0f; float* x6267 = (float*)myMalloc(1 * sizeof(float));; x6267[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1152,128,x6265,x1000,1152,x6267, x1342, 1152, x1000,1152)); arrayFill<<<28, 512>>>(x1342, 0.0f, 147456); float* x6271 = (float*)myMalloc(1 * sizeof(float));; x6271[0] = 1.0f; float* x6273 = (float*)myMalloc(1 * sizeof(float));; x6273[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6271,x1003,1,x6273, x1343, 1, x1003,1)); arrayFill<<<28, 512>>>(x1343, 0.0f, 128); float* x6277 = (float*)myMalloc(1 * sizeof(float));; x6277[0] = 1.0f; float* x6279 = (float*)myMalloc(1 * sizeof(float));; x6279[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6277,x1006,1,x6279, x1344, 1, x1006,1)); arrayFill<<<28, 512>>>(x1344, 0.0f, 256); float* x6283 = (float*)myMalloc(1 * sizeof(float));; x6283[0] = 1.0f; float* x6285 = (float*)myMalloc(1 * sizeof(float));; x6285[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6283,x1009,1,x6285, x1345, 1, x1009,1)); arrayFill<<<28, 512>>>(x1345, 0.0f, 1024); float* x6289 = (float*)myMalloc(1 * sizeof(float));; x6289[0] = 1.0f; float* x6291 = (float*)myMalloc(1 * sizeof(float));; x6291[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x6289,x1012,1,x6291, x1346, 1, x1012,1)); arrayFill<<<28, 512>>>(x1346, 0.0f, 2048); float* x6295 = (float*)myMalloc(1 * sizeof(float));; x6295[0] = 1.0f; float* x6297 = (float*)myMalloc(1 * sizeof(float));; x6297[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6295,x1015,1,x6297, x1347, 1, x1015,1)); arrayFill<<<28, 512>>>(x1347, 0.0f, 256); float* x6301 = (float*)myMalloc(1 * sizeof(float));; x6301[0] = 1.0f; float* x6303 = (float*)myMalloc(1 * sizeof(float));; x6303[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6301,x1018,1,x6303, x1348, 1, x1018,1)); arrayFill<<<28, 512>>>(x1348, 0.0f, 256); float* x6307 = (float*)myMalloc(1 * sizeof(float));; x6307[0] = 1.0f; float* x6309 = (float*)myMalloc(1 * sizeof(float));; x6309[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6307,x1021,1,x6309, x1349, 1, x1021,1)); arrayFill<<<28, 512>>>(x1349, 0.0f, 128); float* x6313 = (float*)myMalloc(1 * sizeof(float));; x6313[0] = 1.0f; float* x6315 = (float*)myMalloc(1 * sizeof(float));; x6315[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6313,x1024,1,x6315, x1350, 1, x1024,1)); arrayFill<<<28, 512>>>(x1350, 0.0f, 256); float* x6319 = (float*)myMalloc(1 * sizeof(float));; x6319[0] = 1.0f; float* x6321 = (float*)myMalloc(1 * sizeof(float));; x6321[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x6319,x1027,1,x6321, x1351, 1, x1027,1)); arrayFill<<<28, 512>>>(x1351, 0.0f, 64); float* x6325 = (float*)myMalloc(1 * sizeof(float));; x6325[0] = 1.0f; float* x6327 = (float*)myMalloc(1 * sizeof(float));; x6327[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x6325,x1030,1,x6327, x1352, 1, x1030,1)); arrayFill<<<28, 512>>>(x1352, 0.0f, 2048); float* x6331 = (float*)myMalloc(1 * sizeof(float));; x6331[0] = 1.0f; float* x6333 = (float*)myMalloc(1 * sizeof(float));; x6333[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6331,x1033,1,x6333, x1353, 1, x1033,1)); arrayFill<<<28, 512>>>(x1353, 0.0f, 512); float* x6337 = (float*)myMalloc(1 * sizeof(float));; x6337[0] = 1.0f; float* x6339 = (float*)myMalloc(1 * sizeof(float));; x6339[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6337,x1036,1,x6339, x1354, 1, x1036,1)); arrayFill<<<28, 512>>>(x1354, 0.0f, 256); float* x6343 = (float*)myMalloc(1 * sizeof(float));; x6343[0] = 1.0f; float* x6345 = (float*)myMalloc(1 * sizeof(float));; x6345[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6343,x1039,1,x6345, x1355, 1, x1039,1)); arrayFill<<<28, 512>>>(x1355, 0.0f, 1024); float* x6349 = (float*)myMalloc(1 * sizeof(float));; x6349[0] = 1.0f; float* x6351 = (float*)myMalloc(1 * sizeof(float));; x6351[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x6349,x1042,2304,x6351, x1356, 2304, x1042,2304)); arrayFill<<<28, 512>>>(x1356, 0.0f, 589824); float* x6355 = (float*)myMalloc(1 * sizeof(float));; x6355[0] = 1.0f; float* x6357 = (float*)myMalloc(1 * sizeof(float));; x6357[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6355,x1045,1,x6357, x1357, 1, x1045,1)); arrayFill<<<28, 512>>>(x1357, 0.0f, 256); float* x6361 = (float*)myMalloc(1 * sizeof(float));; x6361[0] = 1.0f; float* x6363 = (float*)myMalloc(1 * sizeof(float));; x6363[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x6361,x1048,1,x6363, x1358, 1, x1048,1)); arrayFill<<<28, 512>>>(x1358, 0.0f, 64); float* x6367 = (float*)myMalloc(1 * sizeof(float));; x6367[0] = 1.0f; float* x6369 = (float*)myMalloc(1 * sizeof(float));; x6369[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6367,x1051,1,x6369, x1359, 1, x1051,1)); arrayFill<<<28, 512>>>(x1359, 0.0f, 128); float* x6373 = (float*)myMalloc(1 * sizeof(float));; x6373[0] = 1.0f; float* x6375 = (float*)myMalloc(1 * sizeof(float));; x6375[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6373,x1054,1,x6375, x1360, 1, x1054,1)); arrayFill<<<28, 512>>>(x1360, 0.0f, 256); float* x6379 = (float*)myMalloc(1 * sizeof(float));; x6379[0] = 1.0f; float* x6381 = (float*)myMalloc(1 * sizeof(float));; x6381[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6379,x1057,1,x6381, x1361, 1, x1057,1)); arrayFill<<<28, 512>>>(x1361, 0.0f, 256); float* x6385 = (float*)myMalloc(1 * sizeof(float));; x6385[0] = 1.0f; float* x6387 = (float*)myMalloc(1 * sizeof(float));; x6387[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,512,x6385,x1060,1,x6387, x1362, 1, x1060,1)); arrayFill<<<28, 512>>>(x1362, 0.0f, 512); float* x6391 = (float*)myMalloc(1 * sizeof(float));; x6391[0] = 1.0f; float* x6393 = (float*)myMalloc(1 * sizeof(float));; x6393[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 512,128,x6391,x1063,512,x6393, x1363, 512, x1063,512)); arrayFill<<<28, 512>>>(x1363, 0.0f, 65536); float* x6397 = (float*)myMalloc(1 * sizeof(float));; x6397[0] = 1.0f; float* x6399 = (float*)myMalloc(1 * sizeof(float));; x6399[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,64,x6397,x1066,1,x6399, x1364, 1, x1066,1)); arrayFill<<<28, 512>>>(x1364, 0.0f, 64); float* x6403 = (float*)myMalloc(1 * sizeof(float));; x6403[0] = 1.0f; float* x6405 = (float*)myMalloc(1 * sizeof(float));; x6405[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,512,x6403,x1069,256,x6405, x1365, 256, x1069,256)); arrayFill<<<28, 512>>>(x1365, 0.0f, 131072); float* x6409 = (float*)myMalloc(1 * sizeof(float));; x6409[0] = 1.0f; float* x6411 = (float*)myMalloc(1 * sizeof(float));; x6411[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6409,x1072,1,x6411, x1366, 1, x1072,1)); arrayFill<<<28, 512>>>(x1366, 0.0f, 256); float* x6415 = (float*)myMalloc(1 * sizeof(float));; x6415[0] = 1.0f; float* x6417 = (float*)myMalloc(1 * sizeof(float));; x6417[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,2048,x6415,x1075,1,x6417, x1367, 1, x1075,1)); arrayFill<<<28, 512>>>(x1367, 0.0f, 2048); float* x6421 = (float*)myMalloc(1 * sizeof(float));; x6421[0] = 1.0f; float* x6423 = (float*)myMalloc(1 * sizeof(float));; x6423[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6421,x1078,1,x6423, x1368, 1, x1078,1)); arrayFill<<<28, 512>>>(x1368, 0.0f, 128); float* x6427 = (float*)myMalloc(1 * sizeof(float));; x6427[0] = 1.0f; float* x6429 = (float*)myMalloc(1 * sizeof(float));; x6429[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2304,256,x6427,x1081,2304,x6429, x1369, 2304, x1081,2304)); arrayFill<<<28, 512>>>(x1369, 0.0f, 589824); float* x6433 = (float*)myMalloc(1 * sizeof(float));; x6433[0] = 1.0f; float* x6435 = (float*)myMalloc(1 * sizeof(float));; x6435[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6433,x1084,1,x6435, x1370, 1, x1084,1)); arrayFill<<<28, 512>>>(x1370, 0.0f, 1024); float* x6439 = (float*)myMalloc(1 * sizeof(float));; x6439[0] = 1.0f; float* x6441 = (float*)myMalloc(1 * sizeof(float));; x6441[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6439,x1087,1,x6441, x1371, 1, x1087,1)); arrayFill<<<28, 512>>>(x1371, 0.0f, 256); float* x6445 = (float*)myMalloc(1 * sizeof(float));; x6445[0] = 1.0f; float* x6447 = (float*)myMalloc(1 * sizeof(float));; x6447[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 2048,512,x6445,x1090,2048,x6447, x1372, 2048, x1090,2048)); arrayFill<<<28, 512>>>(x1372, 0.0f, 1048576); float* x6451 = (float*)myMalloc(1 * sizeof(float));; x6451[0] = 1.0f; float* x6453 = (float*)myMalloc(1 * sizeof(float));; x6453[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6451,x1093,1,x6453, x1373, 1, x1093,1)); arrayFill<<<28, 512>>>(x1373, 0.0f, 128); float* x6457 = (float*)myMalloc(1 * sizeof(float));; x6457[0] = 1.0f; float* x6459 = (float*)myMalloc(1 * sizeof(float));; x6459[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6457,x1096,1,x6459, x1374, 1, x1096,1)); arrayFill<<<28, 512>>>(x1374, 0.0f, 1024); float* x6463 = (float*)myMalloc(1 * sizeof(float));; x6463[0] = 1.0f; float* x6465 = (float*)myMalloc(1 * sizeof(float));; x6465[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,128,x6463,x1099,1,x6465, x1375, 1, x1099,1)); arrayFill<<<28, 512>>>(x1375, 0.0f, 128); float* x6469 = (float*)myMalloc(1 * sizeof(float));; x6469[0] = 1.0f; float* x6471 = (float*)myMalloc(1 * sizeof(float));; x6471[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 256,1024,x6469,x1102,256,x6471, x1376, 256, x1102,256)); arrayFill<<<28, 512>>>(x1376, 0.0f, 262144); float* x6475 = (float*)myMalloc(1 * sizeof(float));; x6475[0] = 1.0f; float* x6477 = (float*)myMalloc(1 * sizeof(float));; x6477[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6475,x1105,1,x6477, x1377, 1, x1105,1)); arrayFill<<<28, 512>>>(x1377, 0.0f, 256); float* x6481 = (float*)myMalloc(1 * sizeof(float));; x6481[0] = 1.0f; float* x6483 = (float*)myMalloc(1 * sizeof(float));; x6483[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,256,x6481,x1108,1,x6483, x1378, 1, x1108,1)); arrayFill<<<28, 512>>>(x1378, 0.0f, 256); float* x6487 = (float*)myMalloc(1 * sizeof(float));; x6487[0] = 1.0f; float* x6489 = (float*)myMalloc(1 * sizeof(float));; x6489[0] = -0.005f; CUBLAS_CALL(cublasSgeam(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 1,1024,x6487,x1111,1,x6489, x1379, 1, x1111,1)); arrayFill<<<28, 512>>>(x1379, 0.0f, 1024); int32_t x6493 = x1396 + 1; int32_t x6495 = x6493 % x6494; bool x6496 = x6495 == 0; if (x6496) { float x6501 = x1390; double x6497 = (double)x1397; double x6498 = 100.0 * x6497; double x6500 = x6498 / x6499; float x6502 = (float)x1396; float x6503 = x6501 / x6502; printf("Train epoch %d: [%d/%d (%.0f%%)] Average Loss: %.6f\n",x1386,x1397,x11,x6500,x6503); fflush(stdout); } else { } int64_t x6508 = (long)mallocAddr; int64_t x6509 = x6508 - x1382; memset((void*)x1382, 0, x6509); mallocAddr = (void*)x1382; int64_t x6512 = (long)gpuMallocAddr; int64_t x6513 = x6512 - x1383; cudaMemset((void*)x1383, 0, x6513); gpuMallocAddr = (void*)x1383; } gettimeofday(&end_1, NULL); timeval_subtract(&diff_1, &end_1, &begin_1);; int64_t x6520 = ((diff_1.tv_sec * 1000000L) + (diff_1.tv_usec)); double x6521 = (double)x6520; double x6522 = x6521 / 1000000.0; x1381[x1386] = x6522; int64_t x6524 = x6520 / 1000LL; int64_t x6526 = x6520 / x6525; printf("Training completed in %ldms (%ld us/images)\n",x6524,x6526); float x6528 = x1390; float x6530 = x6528 / x6529; double x6531 = (double)x6530; x1380[x1386] = x6531; } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; int64_t x6537 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); sort(x1381, x1381 + 4); double x6543 = x1381[2]; int64_t x6544 = (long)fopen(x0, "w"); fprintf((FILE *)x6544, "unit: %s\n", "1 epoch"); for(int x6546=0; x6546 < 4; x6546++) { double x6547 = x1380[x6546]; fprintf((FILE *)x6544, "%lf\n", x6547); } fprintf((FILE *)x6544, "run time: %lf %lf\n", x39, x6543); fclose((FILE*)x6544); // Backend cleanup. CUBLAS_CALL(cublasDestroy(cublasHandle)); CUDA_CALL(cudaFree(gpuMallocBase)); CUDNN_CALL(cudnnDestroy(cudnnHandle)); } /***************************************** End of C Generated Code *******************************************/
610497a51f27f2f374154365d6026c87176e7dd8.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "testlayers.h" #include <array/ExtraArguments.h> #include <array> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> using namespace sd; class LambdaTests : public testing::Test { public: LambdaTests() { printf("\n"); fflush(stdout); } }; template <typename Lambda> __global__ void runLambda(double *input, double *output, Nd4jLong length, Lambda lambda) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < length; e += gridDim.x * blockDim.x) { output[e] = lambda(input[e]); } } void launcher(hipStream_t *stream, double *input, double *output, Nd4jLong length) { //auto f = [] __host__ __device__ (double x) -> double { // return x + 1.; //}; auto f = LAMBDA_D(x) { return x+1.; }; hipLaunchKernelGGL(( runLambda), dim3(128), dim3(128), 128, *stream, input, output, length, f); } TEST_F(LambdaTests, test_basic_1) { auto x = NDArrayFactory::create<double>('c', {5}); auto e = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.}); //x.applyLambda<double>(f, nullptr); launcher(LaunchContext::defaultContext()->getCudaStream(), (double *)x.specialBuffer(), (double *)x.specialBuffer(), x.lengthOf()); auto res = hipStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream()); ASSERT_EQ(0, res); ASSERT_EQ(e, x); } void test(NDArray &x) { auto f = LAMBDA_D(x) { return x+1.; }; x.applyLambda(f, x); } template <typename T> void test2(NDArray &x) { auto f = LAMBDA_T(x) { return x+1.; }; x.applyLambda(f, x); } void testPairwise(NDArray &x, NDArray &y) { auto f = LAMBDA_DD(x, y) { return x + y +1.; }; x.applyPairwiseLambda(y, f, x); } void testTriplewise(NDArray &i, NDArray &j, NDArray &k) { auto f = LAMBDA_DDD(i, j, k) { return i + j + k + 2.; }; i.applyTriplewiseLambda(j, k, f, i); } void testIndexed(NDArray &x) { auto f = ILAMBDA_D(x) { return _idx + 1.; }; x.applyIndexedLambda(f, x); } void testIndexedPairwise(NDArray &x, NDArray &y) { auto f = ILAMBDA_DD(x, y) { return _idx + x + y +1.; }; x.applyIndexedPairwiseLambda(y, f, x); } TEST_F(LambdaTests, test_basic_2) { auto x = NDArrayFactory::create<double>('c', {5}); auto e = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.}); test(x); ASSERT_EQ(e, x); } TEST_F(LambdaTests, test_basic_3) { auto x = NDArrayFactory::create<float>('c', {5}); auto e = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f}); test(x); ASSERT_EQ(e, x); } TEST_F(LambdaTests, test_basic_4) { auto x = NDArrayFactory::create<float>('c', {5}); auto e = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f}); test2<float>(x); ASSERT_EQ(e, x); } TEST_F(LambdaTests, test_basic_5) { auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.}); auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.}); auto e = NDArrayFactory::create<double>('c', {5}, {4., 4., 4., 4., 4.}); testPairwise(x, y); ASSERT_EQ(e, x); } TEST_F(LambdaTests, test_basic_6) { auto x = NDArrayFactory::create<double>('c', {5}); auto e = NDArrayFactory::create<double>('c', {5}, {1., 2., 3., 4., 5.}); testIndexed(x); ASSERT_EQ(e, x); } TEST_F(LambdaTests, test_basic_7) { auto w = NDArrayFactory::create<double>('c', {5}, {0., 0., 0., 0., 0.}); auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.}); auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.}); auto e = NDArrayFactory::create<double>('c', {5}, {5., 5., 5., 5., 5.}); testTriplewise(w, x, y); ASSERT_EQ(e, w); } TEST_F(LambdaTests, test_basic_8) { auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.}); auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.}); auto e = NDArrayFactory::create<double>('c', {5}, {4., 5., 6., 7., 8.}); testIndexedPairwise(x, y); ASSERT_EQ(e, x); } template <typename T> void testPairwiseMy(NDArray &x, NDArray &y, NDArray &z) { auto f = LAMBDA_TT(x, y){ return sd::math::nd4j_max<T>(x, (T)0.f) - x * y + sd::math::nd4j_log<T,T>((T)1.f + sd::math::nd4j_exp<T,T>(-sd::math::nd4j_abs(x))); }; x.applyPairwiseLambda(y, f, z); } /////////////////////////////////////////////////////////////////// TEST_F(LambdaTests, test_basic_9) { NDArray labels('c', {2,3,4},{0,1,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,1,1,0,1,0}); NDArray logits('c', {2,3,4}, sd::DataType::DOUBLE); NDArray output('c', {2,3,4}, sd::DataType::DOUBLE); NDArray expected('c', {2,3,4}, {0.744397, 0.598139, 0.554355, 0.913015, 0.474077, 1.037488, 0.403186, 1.171101, 0.341154, 1.313262, 0.287335, 1.463282, 0.241008, 1.620417, 0.201413, 1.783901, 0.167786, 1.952978, 2.039387, 0.126928, 0.115520, 2.305083, 0.095545, 2.486836}); logits.linspace(0.1, 0.1); NDArray::prepareSpecialUse({&output}, {&logits, &labels}); testPairwiseMy<double>(logits, labels, output); NDArray::registerSpecialUse({&output}, {&logits, &labels}); // output.printBuffer(nullptr, -1, true); ASSERT_TRUE(expected.equalsTo(output)); }
610497a51f27f2f374154365d6026c87176e7dd8.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "testlayers.h" #include <array/ExtraArguments.h> #include <array> #include <cuda.h> #include <cuda_runtime.h> using namespace sd; class LambdaTests : public testing::Test { public: LambdaTests() { printf("\n"); fflush(stdout); } }; template <typename Lambda> __global__ void runLambda(double *input, double *output, Nd4jLong length, Lambda lambda) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong e = tid; e < length; e += gridDim.x * blockDim.x) { output[e] = lambda(input[e]); } } void launcher(cudaStream_t *stream, double *input, double *output, Nd4jLong length) { //auto f = [] __host__ __device__ (double x) -> double { // return x + 1.; //}; auto f = LAMBDA_D(x) { return x+1.; }; runLambda<<<128, 128, 128, *stream>>>(input, output, length, f); } TEST_F(LambdaTests, test_basic_1) { auto x = NDArrayFactory::create<double>('c', {5}); auto e = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.}); //x.applyLambda<double>(f, nullptr); launcher(LaunchContext::defaultContext()->getCudaStream(), (double *)x.specialBuffer(), (double *)x.specialBuffer(), x.lengthOf()); auto res = cudaStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream()); ASSERT_EQ(0, res); ASSERT_EQ(e, x); } void test(NDArray &x) { auto f = LAMBDA_D(x) { return x+1.; }; x.applyLambda(f, x); } template <typename T> void test2(NDArray &x) { auto f = LAMBDA_T(x) { return x+1.; }; x.applyLambda(f, x); } void testPairwise(NDArray &x, NDArray &y) { auto f = LAMBDA_DD(x, y) { return x + y +1.; }; x.applyPairwiseLambda(y, f, x); } void testTriplewise(NDArray &i, NDArray &j, NDArray &k) { auto f = LAMBDA_DDD(i, j, k) { return i + j + k + 2.; }; i.applyTriplewiseLambda(j, k, f, i); } void testIndexed(NDArray &x) { auto f = ILAMBDA_D(x) { return _idx + 1.; }; x.applyIndexedLambda(f, x); } void testIndexedPairwise(NDArray &x, NDArray &y) { auto f = ILAMBDA_DD(x, y) { return _idx + x + y +1.; }; x.applyIndexedPairwiseLambda(y, f, x); } TEST_F(LambdaTests, test_basic_2) { auto x = NDArrayFactory::create<double>('c', {5}); auto e = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.}); test(x); ASSERT_EQ(e, x); } TEST_F(LambdaTests, test_basic_3) { auto x = NDArrayFactory::create<float>('c', {5}); auto e = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f}); test(x); ASSERT_EQ(e, x); } TEST_F(LambdaTests, test_basic_4) { auto x = NDArrayFactory::create<float>('c', {5}); auto e = NDArrayFactory::create<float>('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f}); test2<float>(x); ASSERT_EQ(e, x); } TEST_F(LambdaTests, test_basic_5) { auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.}); auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.}); auto e = NDArrayFactory::create<double>('c', {5}, {4., 4., 4., 4., 4.}); testPairwise(x, y); ASSERT_EQ(e, x); } TEST_F(LambdaTests, test_basic_6) { auto x = NDArrayFactory::create<double>('c', {5}); auto e = NDArrayFactory::create<double>('c', {5}, {1., 2., 3., 4., 5.}); testIndexed(x); ASSERT_EQ(e, x); } TEST_F(LambdaTests, test_basic_7) { auto w = NDArrayFactory::create<double>('c', {5}, {0., 0., 0., 0., 0.}); auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.}); auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.}); auto e = NDArrayFactory::create<double>('c', {5}, {5., 5., 5., 5., 5.}); testTriplewise(w, x, y); ASSERT_EQ(e, w); } TEST_F(LambdaTests, test_basic_8) { auto x = NDArrayFactory::create<double>('c', {5}, {1., 1., 1., 1., 1.}); auto y = NDArrayFactory::create<double>('c', {5}, {2., 2., 2., 2., 2.}); auto e = NDArrayFactory::create<double>('c', {5}, {4., 5., 6., 7., 8.}); testIndexedPairwise(x, y); ASSERT_EQ(e, x); } template <typename T> void testPairwiseMy(NDArray &x, NDArray &y, NDArray &z) { auto f = LAMBDA_TT(x, y){ return sd::math::nd4j_max<T>(x, (T)0.f) - x * y + sd::math::nd4j_log<T,T>((T)1.f + sd::math::nd4j_exp<T,T>(-sd::math::nd4j_abs(x))); }; x.applyPairwiseLambda(y, f, z); } /////////////////////////////////////////////////////////////////// TEST_F(LambdaTests, test_basic_9) { NDArray labels('c', {2,3,4},{0,1,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,1,1,0,1,0}); NDArray logits('c', {2,3,4}, sd::DataType::DOUBLE); NDArray output('c', {2,3,4}, sd::DataType::DOUBLE); NDArray expected('c', {2,3,4}, {0.744397, 0.598139, 0.554355, 0.913015, 0.474077, 1.037488, 0.403186, 1.171101, 0.341154, 1.313262, 0.287335, 1.463282, 0.241008, 1.620417, 0.201413, 1.783901, 0.167786, 1.952978, 2.039387, 0.126928, 0.115520, 2.305083, 0.095545, 2.486836}); logits.linspace(0.1, 0.1); NDArray::prepareSpecialUse({&output}, {&logits, &labels}); testPairwiseMy<double>(logits, labels, output); NDArray::registerSpecialUse({&output}, {&logits, &labels}); // output.printBuffer(nullptr, -1, true); ASSERT_TRUE(expected.equalsTo(output)); }
c5f3894aca13b9f908273323b5e452dbebfb6d5b.hip
// !!! This is a file automatically generated by hipify!!! // Homework_5 // Problem_4 // change the array size to 8000. Check if answer to problem 3 still works. // RUN as // nvcc prob4.cu // ./a.out #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> //Kernel function to initialize array __global__ void initialize(int *arr, int size){ int sectors = blockIdx.x * blockDim.x + threadIdx.x; int increment = gridDim.x * blockDim.x; for (int i = sectors; i < size; i += increment){ arr[i] = 0; } } //add kernel function to add i to a[i] __global__ void addIValue(int *arr, int size){ int sectors = blockIdx.x * blockDim.x + threadIdx.x; int increment = gridDim.x * blockDim.x; for (int i = sectors; i < size; i+= increment){ arr[i] += i; } } //loop void print(int *ar, int size){ printf("\n"); for (int i = 0; i < size; i++){ printf("%d ", ar[i]); } printf("\n"); } // it prints out message of running int main(void){ printf("Homework#5\nProblem 4:Change the array size to 8000. Check if answer to problem 3 still works\n---Successfully initiated---\n---Check the code---"); //here declare int array int size = 8000; int *array; int GPU = 32; int arraySize = size * sizeof(int); hipMallocManaged(&array, arraySize); int blocks = (size + GPU - 1) / GPU;hipLaunchKernelGGL(( initialize), dim3(blocks), dim3(GPU), 0, 0, array, size); //here add value of i to array hipLaunchKernelGGL(( addIValue), dim3(blocks), dim3(GPU), 0, 0, array, size); hipDeviceSynchronize(); print(array, size); hipFree(array); hipDeviceReset(); return 0; }
c5f3894aca13b9f908273323b5e452dbebfb6d5b.cu
// Homework_5 // Problem_4 // change the array size to 8000. Check if answer to problem 3 still works. // RUN as // nvcc prob4.cu // ./a.out #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> //Kernel function to initialize array __global__ void initialize(int *arr, int size){ int sectors = blockIdx.x * blockDim.x + threadIdx.x; int increment = gridDim.x * blockDim.x; for (int i = sectors; i < size; i += increment){ arr[i] = 0; } } //add kernel function to add i to a[i] __global__ void addIValue(int *arr, int size){ int sectors = blockIdx.x * blockDim.x + threadIdx.x; int increment = gridDim.x * blockDim.x; for (int i = sectors; i < size; i+= increment){ arr[i] += i; } } //loop void print(int *ar, int size){ printf("\n"); for (int i = 0; i < size; i++){ printf("%d ", ar[i]); } printf("\n"); } // it prints out message of running int main(void){ printf("Homework#5\nProblem 4:Change the array size to 8000. Check if answer to problem 3 still works\n---Successfully initiated---\n---Check the code---"); //here declare int array int size = 8000; int *array; int GPU = 32; int arraySize = size * sizeof(int); cudaMallocManaged(&array, arraySize); int blocks = (size + GPU - 1) / GPU; initialize<<<blocks, GPU>>>(array, size); //here add value of i to array addIValue<<<blocks, GPU>>>(array, size); cudaDeviceSynchronize(); print(array, size); cudaFree(array); cudaDeviceReset(); return 0; }
cb893881826f41cc5b9875c7a7df722f42b4314a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include <cstdio> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void LabelRemapForward(const int n, const int* label_map_data, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = static_cast<Dtype>( label_map_data[static_cast<int>(in[index])]); //printf("******%d %d\n", static_cast<int>(in[index]), // label_map_data[static_cast<int>(in[index])]); } } template <typename Dtype> void LabelRemapLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const int* label_map_data = (int*) label_map->gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LabelRemapForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, label_map_data, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FORWARD(LabelRemapLayer); } // namespace caffe
cb893881826f41cc5b9875c7a7df722f42b4314a.cu
#include <algorithm> #include <vector> #include <cstdio> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void LabelRemapForward(const int n, const int* label_map_data, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = static_cast<Dtype>( label_map_data[static_cast<int>(in[index])]); //printf("******%d %d\n", static_cast<int>(in[index]), // label_map_data[static_cast<int>(in[index])]); } } template <typename Dtype> void LabelRemapLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const int* label_map_data = (int*) label_map->gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) LabelRemapForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, label_map_data, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FORWARD(LabelRemapLayer); } // namespace caffe
d4fc3b92043e0618dcdb82859567d9b260d64f7c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> __device__ char key[] = "$1&1234-1234-123456"; __device__ int f(int n, int byte, int c) { for (int bitIndex = 0; bitIndex <= 7; bitIndex++) { int bit = (byte >> bitIndex) & 1; if (bit + ((n - bit) & ~1) == n) { n = (n - bit) >> 1; } else { n = ((c - bit) ^ n) >> 1; } } return n; } __global__ void keygen(char* mathId, int hash_base, char* res) { res += 16*(blockIdx.x*blockDim.x+threadIdx.x); int hash = hash_base + blockIdx.x*blockDim.x + threadIdx.x; for(int byteIndex = 18; byteIndex >= 0; byteIndex--){ hash = f(hash, (int)key[byteIndex], 0x105C3); } for(int byteIndex = 15; byteIndex >= 0; byteIndex--){ hash = f(hash, (int)mathId[byteIndex], 0x105C3); } int n1 = 0; while (f(f(hash, n1 & 0xFF, 0x105C3), n1 >> 8, 0x105C3) != 0xA5B6) { ++n1; } n1 = floor(((n1 + 0x72FA) & 0xFFFF) * 99999.0 / 0xFFFF); int temp = n1/1000*1000 + n1%100*10 + n1%1000/100; temp = ceil((temp/99999.0)*0xFFFF); temp = f(f(0, temp & 0xFF, 0x1064B), temp >> 8, 0x1064B); for(int byteIndex = 18; byteIndex >= 0; byteIndex--){ temp = f(temp, (int)key[byteIndex], 0x1064B); } for(int byteIndex = 15; byteIndex >= 0; byteIndex--){ temp = f(temp, (int)mathId[byteIndex], 0x1064B); } int n2 = 0; while (f(f(temp, n2 & 0xFF, 0x1064B), n2 >> 8, 0x1064B) != 0xA5B6) { ++n2; } n2 = floor((n2 & 0xFFFF) * 99999.0 / 0xFFFF); res[10] = n1 % 10 + 48; res[1] = (n1/=10) % 10 + 48; res[6] = (n1/=10) % 10 + 48; res[2] = (n1/=10) % 10 + 48; res[3] = (n1/=10) % 10 + 48; res[5] = n2 % 10 + 48; res[0] = (n2/=10) % 10 + 48; res[9] = (n2/=10) % 10 + 48; res[11] =(n2/=10) % 10 + 48; res[7] = (n2/=10) % 10 + 48; res[4] = '-'; res[8] = '-'; res[12] = ':'; res[13] = ':'; res[14] = '1'; res[15] = 0; } #if !defined Thread_Num #define Thread_Num 1024 #endif int main(int argc, char** argv){ int hashStart = 0x0; int hashEnd = 0x10000; if(argc==4){ sscanf(argv[2],"%x",&hashStart); sscanf(argv[3],"%x",&hashEnd); } if(argc==3){ sscanf(argv[2],"%x",&hashStart); hashEnd = hashStart + 1; } int Total_Number = hashEnd - hashStart; int Block_Num = (Total_Number + Thread_Num - 1)/ Thread_Num; char* math_id = NULL; size_t math_id_size = strlen(argv[1])*sizeof(char); hipMalloc((void**)&math_id, math_id_size); hipMemcpy(math_id, argv[1], math_id_size, hipMemcpyHostToDevice); char h_res[16*Block_Num*Thread_Num]; char* d_res = NULL; size_t res_size = 16*Block_Num*Thread_Num*sizeof(char); hipMalloc((void**)&d_res, res_size); printf("Hash MathId Key Password\n"); hipLaunchKernelGGL(( keygen), dim3(Block_Num),dim3(Thread_Num), 0, 0, math_id,hashStart,d_res); hipMemcpy(h_res, d_res, 16*Total_Number*sizeof(char), hipMemcpyDeviceToHost); for(int hash_del = 0; hash_del<Total_Number; hash_del++){ printf("%04X %s 1234-1234-123456 %s\n", hashStart+hash_del, argv[1], h_res+16*hash_del); } return 0; }
d4fc3b92043e0618dcdb82859567d9b260d64f7c.cu
#include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> __device__ char key[] = "$1&1234-1234-123456"; __device__ int f(int n, int byte, int c) { for (int bitIndex = 0; bitIndex <= 7; bitIndex++) { int bit = (byte >> bitIndex) & 1; if (bit + ((n - bit) & ~1) == n) { n = (n - bit) >> 1; } else { n = ((c - bit) ^ n) >> 1; } } return n; } __global__ void keygen(char* mathId, int hash_base, char* res) { res += 16*(blockIdx.x*blockDim.x+threadIdx.x); int hash = hash_base + blockIdx.x*blockDim.x + threadIdx.x; for(int byteIndex = 18; byteIndex >= 0; byteIndex--){ hash = f(hash, (int)key[byteIndex], 0x105C3); } for(int byteIndex = 15; byteIndex >= 0; byteIndex--){ hash = f(hash, (int)mathId[byteIndex], 0x105C3); } int n1 = 0; while (f(f(hash, n1 & 0xFF, 0x105C3), n1 >> 8, 0x105C3) != 0xA5B6) { ++n1; } n1 = floor(((n1 + 0x72FA) & 0xFFFF) * 99999.0 / 0xFFFF); int temp = n1/1000*1000 + n1%100*10 + n1%1000/100; temp = ceil((temp/99999.0)*0xFFFF); temp = f(f(0, temp & 0xFF, 0x1064B), temp >> 8, 0x1064B); for(int byteIndex = 18; byteIndex >= 0; byteIndex--){ temp = f(temp, (int)key[byteIndex], 0x1064B); } for(int byteIndex = 15; byteIndex >= 0; byteIndex--){ temp = f(temp, (int)mathId[byteIndex], 0x1064B); } int n2 = 0; while (f(f(temp, n2 & 0xFF, 0x1064B), n2 >> 8, 0x1064B) != 0xA5B6) { ++n2; } n2 = floor((n2 & 0xFFFF) * 99999.0 / 0xFFFF); res[10] = n1 % 10 + 48; res[1] = (n1/=10) % 10 + 48; res[6] = (n1/=10) % 10 + 48; res[2] = (n1/=10) % 10 + 48; res[3] = (n1/=10) % 10 + 48; res[5] = n2 % 10 + 48; res[0] = (n2/=10) % 10 + 48; res[9] = (n2/=10) % 10 + 48; res[11] =(n2/=10) % 10 + 48; res[7] = (n2/=10) % 10 + 48; res[4] = '-'; res[8] = '-'; res[12] = ':'; res[13] = ':'; res[14] = '1'; res[15] = 0; } #if !defined Thread_Num #define Thread_Num 1024 #endif int main(int argc, char** argv){ int hashStart = 0x0; int hashEnd = 0x10000; if(argc==4){ sscanf(argv[2],"%x",&hashStart); sscanf(argv[3],"%x",&hashEnd); } if(argc==3){ sscanf(argv[2],"%x",&hashStart); hashEnd = hashStart + 1; } int Total_Number = hashEnd - hashStart; int Block_Num = (Total_Number + Thread_Num - 1)/ Thread_Num; char* math_id = NULL; size_t math_id_size = strlen(argv[1])*sizeof(char); cudaMalloc((void**)&math_id, math_id_size); cudaMemcpy(math_id, argv[1], math_id_size, cudaMemcpyHostToDevice); char h_res[16*Block_Num*Thread_Num]; char* d_res = NULL; size_t res_size = 16*Block_Num*Thread_Num*sizeof(char); cudaMalloc((void**)&d_res, res_size); printf("Hash MathId Key Password\n"); keygen<<<Block_Num,Thread_Num>>>(math_id,hashStart,d_res); cudaMemcpy(h_res, d_res, 16*Total_Number*sizeof(char), cudaMemcpyDeviceToHost); for(int hash_del = 0; hash_del<Total_Number; hash_del++){ printf("%04X %s 1234-1234-123456 %s\n", hashStart+hash_del, argv[1], h_res+16*hash_del); } return 0; }
2f0d4b56c6c91c0bd5b66fd09acc1d66c7d8aed7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include <iostream> #include <time.h> using namespace std; __global__ void multSquareMatrix(int *A, int *B, int *result, int n) { int k, sum = 0; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; for (k = 0; k < n; k++) { sum += A[row * n + k] * B[k * n + col]; result[row * n + col] = sum; } } #define N 32 void initMat(int* mat) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { mat[i * N + j] = 1 + i + j; // printf("%d \t", mat[i * N + j]); } //printf("\n"); } } int main() { hipDeviceProp_t props; hipGetDeviceProperties(&props, 0); int xBlock = props.maxThreadsDim[0]; int yBlock = props.maxThreadsDim[1]; if (xBlock > N) { xBlock = N; } if (yBlock > N) { yBlock = N; } int xGrid = props.maxGridSize[0]; int yGrid = props.maxGridSize[1]; if (xGrid > ceil(1.0 * N / xBlock)) { xGrid = ceil(1.0 * N / xBlock); } if (yGrid > ceil(1.0 * N / yBlock)) { yGrid = ceil(1.0 * N / yBlock); } dim3 dimBlock(xBlock, yBlock); dim3 dimGrid(xGrid, yGrid); //start = clock(); int *arr1_h = (int*)malloc(sizeof(int) * N * N); int *arr2_h = (int*)malloc(sizeof(int) * N * N); initMat(arr1_h); initMat(arr2_h); int *arr1_d, *arr2_d, *result_d; hipMalloc(&arr1_d, sizeof(int) * N * N); hipMalloc(&arr2_d, sizeof(int) * N * N); hipMalloc(&result_d, sizeof(int) * N * N); hipMemcpy(arr1_d, arr1_h, sizeof(int) * N * N, hipMemcpyHostToDevice); hipMemcpy(arr2_d, arr2_h, sizeof(int) * N * N, hipMemcpyHostToDevice); multSquareMatrix << <dimBlock, dimGrid >> >(arr1_d, arr2_d, result_d, N); int *result_h = (int*)malloc(sizeof(int) * N * N); hipMemcpy(result_h, result_d, sizeof(int) * N * N, hipMemcpyDeviceToHost); hipFree(result_d); hipFree(arr1_d); hipFree(arr2_d); free(arr1_h); free(arr2_h); free(result_h); return 0; }
2f0d4b56c6c91c0bd5b66fd09acc1d66c7d8aed7.cu
#include <stdio.h> #include <math.h> #include <iostream> #include <time.h> using namespace std; __global__ void multSquareMatrix(int *A, int *B, int *result, int n) { int k, sum = 0; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; for (k = 0; k < n; k++) { sum += A[row * n + k] * B[k * n + col]; result[row * n + col] = sum; } } #define N 32 void initMat(int* mat) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { mat[i * N + j] = 1 + i + j; // printf("%d \t", mat[i * N + j]); } //printf("\n"); } } int main() { cudaDeviceProp props; cudaGetDeviceProperties(&props, 0); int xBlock = props.maxThreadsDim[0]; int yBlock = props.maxThreadsDim[1]; if (xBlock > N) { xBlock = N; } if (yBlock > N) { yBlock = N; } int xGrid = props.maxGridSize[0]; int yGrid = props.maxGridSize[1]; if (xGrid > ceil(1.0 * N / xBlock)) { xGrid = ceil(1.0 * N / xBlock); } if (yGrid > ceil(1.0 * N / yBlock)) { yGrid = ceil(1.0 * N / yBlock); } dim3 dimBlock(xBlock, yBlock); dim3 dimGrid(xGrid, yGrid); //start = clock(); int *arr1_h = (int*)malloc(sizeof(int) * N * N); int *arr2_h = (int*)malloc(sizeof(int) * N * N); initMat(arr1_h); initMat(arr2_h); int *arr1_d, *arr2_d, *result_d; cudaMalloc(&arr1_d, sizeof(int) * N * N); cudaMalloc(&arr2_d, sizeof(int) * N * N); cudaMalloc(&result_d, sizeof(int) * N * N); cudaMemcpy(arr1_d, arr1_h, sizeof(int) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(arr2_d, arr2_h, sizeof(int) * N * N, cudaMemcpyHostToDevice); multSquareMatrix << <dimBlock, dimGrid >> >(arr1_d, arr2_d, result_d, N); int *result_h = (int*)malloc(sizeof(int) * N * N); cudaMemcpy(result_h, result_d, sizeof(int) * N * N, cudaMemcpyDeviceToHost); cudaFree(result_d); cudaFree(arr1_d); cudaFree(arr2_d); free(arr1_h); free(arr2_h); free(result_h); return 0; }
b74a5c96e092e5ba2b78555c528e72e9f7979e24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) // Thread block size #define BLOCK_SIZE 16 // Tile Width size #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP __shared__ float ds_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_B[TILE_WIDTH][TILE_WIDTH]; int tx = threadIdx.x; int ty = threadIdx.y; int m = numARows; int n = numBRows; int k = numBColumns; int numRows = blockIdx.y * blockDim.y + ty; int numColumns = blockIdx.x * blockDim.x + tx; float Cval = 0.0; //Loading A and B elements and doing Boundary Check for(int t = 0; t < (n-1)/TILE_WIDTH + 1; t++) { if((numRows < numARows) && (t*TILE_WIDTH+tx < n)) { ds_A[ty][tx] = A[numRows*n + t*TILE_WIDTH+tx]; } else { ds_A[ty][tx] = 0.0; } if((numColumns < k) && (t*TILE_WIDTH+ty < n)) { ds_B[ty][tx] = B[(t*TILE_WIDTH+ty)*k + numColumns]; } else { ds_B[ty][tx] = 0.0; } __syncthreads(); for(int i = 0; i < TILE_WIDTH; i++) { Cval += ds_A[ty][i] * ds_B[i][tx]; } __syncthreads(); } if(numRows < m && numColumns < k) { C[numRows*k + numColumns] = Cval; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = ( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = ( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float*)malloc(numCRows*numCColumns*sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns); int Size_A = numARows*numAColumns*sizeof(float); int Size_B = numBRows*numBColumns*sizeof(float); int Size_C = numCRows*numCColumns*sizeof(float); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(hipMalloc((void**)&deviceA, Size_A)); wbCheck(hipMalloc((void**)&deviceB, Size_B)); wbCheck(hipMalloc((void**)&deviceC, Size_C)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(deviceA, hostA, Size_A, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, Size_B, hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 dimGrid(((numBColumns-1)/dimBlock.x)+1, ((numARows-1)/dimBlock.y)+1, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiplyShared), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostC, deviceC, Size_C, hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
b74a5c96e092e5ba2b78555c528e72e9f7979e24.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) // Thread block size #define BLOCK_SIZE 16 // Tile Width size #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP __shared__ float ds_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_B[TILE_WIDTH][TILE_WIDTH]; int tx = threadIdx.x; int ty = threadIdx.y; int m = numARows; int n = numBRows; int k = numBColumns; int numRows = blockIdx.y * blockDim.y + ty; int numColumns = blockIdx.x * blockDim.x + tx; float Cval = 0.0; //Loading A and B elements and doing Boundary Check for(int t = 0; t < (n-1)/TILE_WIDTH + 1; t++) { if((numRows < numARows) && (t*TILE_WIDTH+tx < n)) { ds_A[ty][tx] = A[numRows*n + t*TILE_WIDTH+tx]; } else { ds_A[ty][tx] = 0.0; } if((numColumns < k) && (t*TILE_WIDTH+ty < n)) { ds_B[ty][tx] = B[(t*TILE_WIDTH+ty)*k + numColumns]; } else { ds_B[ty][tx] = 0.0; } __syncthreads(); for(int i = 0; i < TILE_WIDTH; i++) { Cval += ds_A[ty][i] * ds_B[i][tx]; } __syncthreads(); } if(numRows < m && numColumns < k) { C[numRows*k + numColumns] = Cval; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = ( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = ( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float*)malloc(numCRows*numCColumns*sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns); int Size_A = numARows*numAColumns*sizeof(float); int Size_B = numBRows*numBColumns*sizeof(float); int Size_C = numCRows*numCColumns*sizeof(float); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(cudaMalloc((void**)&deviceA, Size_A)); wbCheck(cudaMalloc((void**)&deviceB, Size_B)); wbCheck(cudaMalloc((void**)&deviceC, Size_C)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(deviceA, hostA, Size_A, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, Size_B, cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 dimGrid(((numBColumns-1)/dimBlock.x)+1, ((numARows-1)/dimBlock.y)+1, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiplyShared<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostC, deviceC, Size_C, cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
0ff483f9e8bfc808f91e513a0f24edeec84ba042.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include "main.h" __global__ void CUDA_edge_reverse(FLOAT_PRECISION *cuda_buff, FLOAT_PRECISION *cuda_old, int nx, int ny, int max_iter) { int iter; FLOAT_PRECISION cuda_new; /* * Find the thread number of a CUDA core and the corresponding index for a * 2D array */ int tx = threadIdx.x; int ty = threadIdx.y; int colID = blockDim.x * blockIdx.x + tx; int rowID = blockDim.y * blockIdx.y + ty; if ((colID > 0 ) && (colID < nx+1) && (rowID > 0) && (rowID < ny+1)) { /* * Image processing iterations */ for (iter = 0; iter < max_iter; iter++) { cuda_new = 0.25 * ( cuda_old[colID * (ny + 2) + (rowID - 1)] + cuda_old[colID * (ny + 2) + (rowID + 1)] + cuda_old[(colID - 1) * (ny + 2) + rowID] + cuda_old[(colID + 1) * (ny + 2) + rowID] - cuda_buff[(colID - 1) * ny + (rowID - 1)] ); /* * Set the new values to be the old values for the next iteration */ cuda_old[colID * (ny + 2) + rowID] = cuda_new; /* * Sync the threads to make sure that all the cells have updated * correctly for the next iteration */ __syncthreads(); } /* * Once interations are complete, copy the "old" values, i.e. the * reversed pixel values, into the image buffer */ cuda_buff[(colID - 1) * ny + (rowID - 1)] = cuda_old[colID * (ny + 2) + rowID]; } } extern "C" void cuda_wrapper(FLOAT_PRECISION *image_buff, int nx, int ny, int max_iter) { int i, buff_size, image_size; float cuda_runtime; // has to be a float for the CUDA functions :^) hipEvent_t cuda_start, cuda_stop; FLOAT_PRECISION *host_old = NULL, *cuda_old = NULL, *cuda_new = NULL, *cuda_buff = NULL; hipEventCreate(&cuda_start); hipEventCreate(&cuda_stop); /* * Allocate memory on the device and copy the normalised host buff into the * device memory */ image_size = nx * ny * sizeof(FLOAT_PRECISION); buff_size = (nx + 2) * (ny + 2) * sizeof(FLOAT_PRECISION); host_old = (FLOAT_PRECISION *) malloc(buff_size); for (i = 0; i < (nx + 2) * (ny + 2); i++) host_old[i] = 255.0; hipMalloc((void **) &cuda_old, buff_size); hipMalloc((void **) &cuda_new, buff_size); hipMalloc((void **) &cuda_buff, image_size); /* * Copy the image to the cuda image buff */ hipMemcpy(cuda_old, host_old, buff_size, hipMemcpyHostToDevice); hipMemcpy(cuda_buff, image_buff, image_size, hipMemcpyHostToDevice); free(host_old); /* * Begin image processing using CUDA * - (16, 8) is 128 threads per block */ dim3 n_threads(16, 8); dim3 n_blocks((nx + 2)/n_threads.x + 1, (ny + 2)/n_threads.y + 1); hipEventRecord(cuda_start, MASTER_GPU); /* * Call the CUDA kernel to do the image processing. The number of blocks * created exceeds the number of threads required, but guarantees that all * pixels get a CUDA code */ hipLaunchKernelGGL(( CUDA_edge_reverse), dim3(n_blocks), dim3(n_threads), 0, 0, cuda_buff, cuda_old, nx, ny, max_iter); hipEventRecord(cuda_stop, MASTER_GPU); hipEventSynchronize(cuda_stop); hipEventElapsedTime(&cuda_runtime, cuda_start, cuda_stop); /* * Copy the device result into host memory and free the pointer */ hipMemcpy(image_buff, cuda_buff, image_size, hipMemcpyDeviceToHost); hipFree(cuda_buff); printf("\n---------------------------------------\n"); printf("\nKernel runtime: %5.3f ms\n", cuda_runtime); printf("\n---------------------------------------\n"); }
0ff483f9e8bfc808f91e513a0f24edeec84ba042.cu
#include <stdio.h> #include <math.h> #include "main.h" __global__ void CUDA_edge_reverse(FLOAT_PRECISION *cuda_buff, FLOAT_PRECISION *cuda_old, int nx, int ny, int max_iter) { int iter; FLOAT_PRECISION cuda_new; /* * Find the thread number of a CUDA core and the corresponding index for a * 2D array */ int tx = threadIdx.x; int ty = threadIdx.y; int colID = blockDim.x * blockIdx.x + tx; int rowID = blockDim.y * blockIdx.y + ty; if ((colID > 0 ) && (colID < nx+1) && (rowID > 0) && (rowID < ny+1)) { /* * Image processing iterations */ for (iter = 0; iter < max_iter; iter++) { cuda_new = 0.25 * ( cuda_old[colID * (ny + 2) + (rowID - 1)] + cuda_old[colID * (ny + 2) + (rowID + 1)] + cuda_old[(colID - 1) * (ny + 2) + rowID] + cuda_old[(colID + 1) * (ny + 2) + rowID] - cuda_buff[(colID - 1) * ny + (rowID - 1)] ); /* * Set the new values to be the old values for the next iteration */ cuda_old[colID * (ny + 2) + rowID] = cuda_new; /* * Sync the threads to make sure that all the cells have updated * correctly for the next iteration */ __syncthreads(); } /* * Once interations are complete, copy the "old" values, i.e. the * reversed pixel values, into the image buffer */ cuda_buff[(colID - 1) * ny + (rowID - 1)] = cuda_old[colID * (ny + 2) + rowID]; } } extern "C" void cuda_wrapper(FLOAT_PRECISION *image_buff, int nx, int ny, int max_iter) { int i, buff_size, image_size; float cuda_runtime; // has to be a float for the CUDA functions :^) cudaEvent_t cuda_start, cuda_stop; FLOAT_PRECISION *host_old = NULL, *cuda_old = NULL, *cuda_new = NULL, *cuda_buff = NULL; cudaEventCreate(&cuda_start); cudaEventCreate(&cuda_stop); /* * Allocate memory on the device and copy the normalised host buff into the * device memory */ image_size = nx * ny * sizeof(FLOAT_PRECISION); buff_size = (nx + 2) * (ny + 2) * sizeof(FLOAT_PRECISION); host_old = (FLOAT_PRECISION *) malloc(buff_size); for (i = 0; i < (nx + 2) * (ny + 2); i++) host_old[i] = 255.0; cudaMalloc((void **) &cuda_old, buff_size); cudaMalloc((void **) &cuda_new, buff_size); cudaMalloc((void **) &cuda_buff, image_size); /* * Copy the image to the cuda image buff */ cudaMemcpy(cuda_old, host_old, buff_size, cudaMemcpyHostToDevice); cudaMemcpy(cuda_buff, image_buff, image_size, cudaMemcpyHostToDevice); free(host_old); /* * Begin image processing using CUDA * - (16, 8) is 128 threads per block */ dim3 n_threads(16, 8); dim3 n_blocks((nx + 2)/n_threads.x + 1, (ny + 2)/n_threads.y + 1); cudaEventRecord(cuda_start, MASTER_GPU); /* * Call the CUDA kernel to do the image processing. The number of blocks * created exceeds the number of threads required, but guarantees that all * pixels get a CUDA code */ CUDA_edge_reverse<<<n_blocks, n_threads>>>(cuda_buff, cuda_old, nx, ny, max_iter); cudaEventRecord(cuda_stop, MASTER_GPU); cudaEventSynchronize(cuda_stop); cudaEventElapsedTime(&cuda_runtime, cuda_start, cuda_stop); /* * Copy the device result into host memory and free the pointer */ cudaMemcpy(image_buff, cuda_buff, image_size, cudaMemcpyDeviceToHost); cudaFree(cuda_buff); printf("\n---------------------------------------\n"); printf("\nKernel runtime: %5.3f ms\n", cuda_runtime); printf("\n---------------------------------------\n"); }
57e2ba176d1a297c1948ce1442328e3cc1ee2e93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * setParticlesWS.cu * * Created on: 30-10-2015 * Author: Kamil Szewc */ #include "../../sph.h" #include "../../hlp.h" __global__ void setParticlesWSDP(Particle *p, Parameters *par) { unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; while (tid < par->N_DISPERSED_PHASE_FLUID) { p[tid].di = 4.0; p[tid].d = p[tid].o * p[tid].di; p[tid].m = par->XCV * par->YCV * p[tid].d / (par->NX * par->NY); tid += blockDim.x * gridDim.x; } }
57e2ba176d1a297c1948ce1442328e3cc1ee2e93.cu
/* * setParticlesWS.cu * * Created on: 30-10-2015 * Author: Kamil Szewc */ #include "../../sph.h" #include "../../hlp.h" __global__ void setParticlesWSDP(Particle *p, Parameters *par) { unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; while (tid < par->N_DISPERSED_PHASE_FLUID) { p[tid].di = 4.0; p[tid].d = p[tid].o * p[tid].di; p[tid].m = par->XCV * par->YCV * p[tid].d / (par->NX * par->NY); tid += blockDim.x * gridDim.x; } }
a55c3a1617264a7f14ad67b3c296bd2df82a9b53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> s d c */ #include "magma_internal.h" #define BLOCK_SIZE 64 typedef struct { magmaDoubleComplex *A; magmaDoubleComplex *B; int n, ldda, lddb, npivots; short ipiv[BLOCK_SIZE]; } magmagpu_zswapblk_params_t; /******************************************************************************/ __global__ void magmagpu_zswapblkrm( magmagpu_zswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; if ( y < params.n ) { magmaDoubleComplex *A = params.A + y - params.ldda; magmaDoubleComplex *B = params.B + y; for( int i = 0; i < params.npivots; i++ ) { A += params.ldda; if ( params.ipiv[i] == -1 ) continue; magmaDoubleComplex tmp1 = *A; magmaDoubleComplex *tmp2 = B + params.ipiv[i]*params.lddb; *A = *tmp2; *tmp2 = tmp1; } } } /******************************************************************************/ __global__ void magmagpu_zswapblkcm( magmagpu_zswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; unsigned int offset1 = y*params.ldda; unsigned int offset2 = y*params.lddb; if ( y < params.n ) { magmaDoubleComplex *A = params.A + offset1 - 1; magmaDoubleComplex *B = params.B + offset2; for( int i = 0; i < params.npivots; i++ ) { A++; if ( params.ipiv[i] == -1 ) continue; magmaDoubleComplex tmp1 = *A; magmaDoubleComplex *tmp2 = B + params.ipiv[i]; *A = *tmp2; *tmp2 = tmp1; } } __syncthreads(); } /***************************************************************************//** Blocked version: swap several pairs of lines. Used in magma_ztstrf() and magma_zssssm(). @ingroup magma_swapblk *******************************************************************************/ extern "C" void magmablas_zswapblk( magma_order_t order, magma_int_t n, magmaDoubleComplex_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dB, magma_int_t lddb, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset, magma_queue_t queue ) { magma_int_t blocksize = 64; dim3 blocks( magma_ceildiv( n, blocksize ) ); magma_int_t k, im; /* Quick return */ if ( n == 0 ) return; if ( order == MagmaColMajor ) { for( k=(i1-1); k < i2; k += BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_zswapblk_params_t params = { dA+k, dB, int(n), int(ldda), int(lddb), int(sb) }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } hipLaunchKernelGGL(( magmagpu_zswapblkcm), dim3(blocks), dim3(blocksize), 0, queue->cuda_stream() , params ); } } else { for( k=(i1-1); k < i2; k += BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_zswapblk_params_t params = { dA+k*ldda, dB, int(n), int(ldda), int(lddb), int(sb) }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } hipLaunchKernelGGL(( magmagpu_zswapblkrm), dim3(blocks), dim3(blocksize), 0, queue->cuda_stream() , params ); } } }
a55c3a1617264a7f14ad67b3c296bd2df82a9b53.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> s d c */ #include "magma_internal.h" #define BLOCK_SIZE 64 typedef struct { magmaDoubleComplex *A; magmaDoubleComplex *B; int n, ldda, lddb, npivots; short ipiv[BLOCK_SIZE]; } magmagpu_zswapblk_params_t; /******************************************************************************/ __global__ void magmagpu_zswapblkrm( magmagpu_zswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; if ( y < params.n ) { magmaDoubleComplex *A = params.A + y - params.ldda; magmaDoubleComplex *B = params.B + y; for( int i = 0; i < params.npivots; i++ ) { A += params.ldda; if ( params.ipiv[i] == -1 ) continue; magmaDoubleComplex tmp1 = *A; magmaDoubleComplex *tmp2 = B + params.ipiv[i]*params.lddb; *A = *tmp2; *tmp2 = tmp1; } } } /******************************************************************************/ __global__ void magmagpu_zswapblkcm( magmagpu_zswapblk_params_t params ) { unsigned int y = threadIdx.x + blockDim.x*blockIdx.x; unsigned int offset1 = y*params.ldda; unsigned int offset2 = y*params.lddb; if ( y < params.n ) { magmaDoubleComplex *A = params.A + offset1 - 1; magmaDoubleComplex *B = params.B + offset2; for( int i = 0; i < params.npivots; i++ ) { A++; if ( params.ipiv[i] == -1 ) continue; magmaDoubleComplex tmp1 = *A; magmaDoubleComplex *tmp2 = B + params.ipiv[i]; *A = *tmp2; *tmp2 = tmp1; } } __syncthreads(); } /***************************************************************************//** Blocked version: swap several pairs of lines. Used in magma_ztstrf() and magma_zssssm(). @ingroup magma_swapblk *******************************************************************************/ extern "C" void magmablas_zswapblk( magma_order_t order, magma_int_t n, magmaDoubleComplex_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dB, magma_int_t lddb, magma_int_t i1, magma_int_t i2, const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset, magma_queue_t queue ) { magma_int_t blocksize = 64; dim3 blocks( magma_ceildiv( n, blocksize ) ); magma_int_t k, im; /* Quick return */ if ( n == 0 ) return; if ( order == MagmaColMajor ) { for( k=(i1-1); k < i2; k += BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_zswapblk_params_t params = { dA+k, dB, int(n), int(ldda), int(lddb), int(sb) }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } magmagpu_zswapblkcm<<< blocks, blocksize, 0, queue->cuda_stream() >>>( params ); } } else { for( k=(i1-1); k < i2; k += BLOCK_SIZE ) { magma_int_t sb = min(BLOCK_SIZE, i2-k); magmagpu_zswapblk_params_t params = { dA+k*ldda, dB, int(n), int(ldda), int(lddb), int(sb) }; for( magma_int_t j = 0; j < sb; j++ ) { im = ipiv[(k+j)*inci] - 1; if ( (k+j) == im ) params.ipiv[j] = -1; else params.ipiv[j] = im - offset; } magmagpu_zswapblkrm<<< blocks, blocksize, 0, queue->cuda_stream() >>>( params ); } } }
5540a8d8c8637197b0c1a8c642755fae2d3a5882.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_utils.h" #include <decoupled_lookback.cuh> #include <gtest/gtest.h> #include <raft/core/interruptible.hpp> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace MLCommon { template <int TPB> __global__ void dlbTestKernel(void* workspace, int len, int* out) { DecoupledLookBack<int> dlb(workspace); int count = threadIdx.x == blockDim.x - 1 ? 1 : 0; auto prefix = dlb(count); if (threadIdx.x == blockDim.x - 1) out[blockIdx.x] = prefix; } void dlbTest(int len, int* out, hipStream_t stream) { constexpr int TPB = 256; int nblks = len; size_t workspaceSize = DecoupledLookBack<int>::computeWorkspaceSize(nblks); rmm::device_uvector<char> workspace(workspaceSize, stream); RAFT_CUDA_TRY(hipMemset(workspace.data(), 0, workspace.size())); hipLaunchKernelGGL(( dlbTestKernel<TPB>), dim3(nblks), dim3(TPB), 0, 0, workspace.data(), len, out); RAFT_CUDA_TRY(hipPeekAtLastError()); } struct DlbInputs { int len; }; ::std::ostream& operator<<(::std::ostream& os, const DlbInputs& dims) { return os; } class DlbTest : public ::testing::TestWithParam<DlbInputs> { protected: DlbTest() : out(0, stream) {} void SetUp() override { RAFT_CUDA_TRY(hipStreamCreate(&stream)); params = ::testing::TestWithParam<DlbInputs>::GetParam(); int len = params.len; out.resize(len, stream); dlbTest(len, out.data(), stream); } protected: hipStream_t stream = 0; DlbInputs params; rmm::device_uvector<int> out; }; template <typename T, typename L> ::testing::AssertionResult devArrMatchCustom(const T* actual, size_t size, L eq_compare, hipStream_t stream = 0) { std::vector<T> act_h(size); raft::update_host<T>(&(act_h[0]), actual, size, stream); raft::interruptible::synchronize(stream); for (size_t i(0); i < size; ++i) { auto act = act_h[i]; auto expected = (T)i; if (!eq_compare(expected, act)) { return ::testing::AssertionFailure() << "actual=" << act << " != expected=" << expected << " @" << i; } } return ::testing::AssertionSuccess(); } const std::vector<DlbInputs> inputs = {{4}, {16}, {64}, {256}, {2048}}; TEST_P(DlbTest, Result) { ASSERT_TRUE(devArrMatchCustom(out.data(), params.len, MLCommon::Compare<int>())); } INSTANTIATE_TEST_CASE_P(DlbTests, DlbTest, ::testing::ValuesIn(inputs)); } // end namespace MLCommon
5540a8d8c8637197b0c1a8c642755fae2d3a5882.cu
/* * Copyright (c) 2018-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_utils.h" #include <decoupled_lookback.cuh> #include <gtest/gtest.h> #include <raft/core/interruptible.hpp> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace MLCommon { template <int TPB> __global__ void dlbTestKernel(void* workspace, int len, int* out) { DecoupledLookBack<int> dlb(workspace); int count = threadIdx.x == blockDim.x - 1 ? 1 : 0; auto prefix = dlb(count); if (threadIdx.x == blockDim.x - 1) out[blockIdx.x] = prefix; } void dlbTest(int len, int* out, cudaStream_t stream) { constexpr int TPB = 256; int nblks = len; size_t workspaceSize = DecoupledLookBack<int>::computeWorkspaceSize(nblks); rmm::device_uvector<char> workspace(workspaceSize, stream); RAFT_CUDA_TRY(cudaMemset(workspace.data(), 0, workspace.size())); dlbTestKernel<TPB><<<nblks, TPB>>>(workspace.data(), len, out); RAFT_CUDA_TRY(cudaPeekAtLastError()); } struct DlbInputs { int len; }; ::std::ostream& operator<<(::std::ostream& os, const DlbInputs& dims) { return os; } class DlbTest : public ::testing::TestWithParam<DlbInputs> { protected: DlbTest() : out(0, stream) {} void SetUp() override { RAFT_CUDA_TRY(cudaStreamCreate(&stream)); params = ::testing::TestWithParam<DlbInputs>::GetParam(); int len = params.len; out.resize(len, stream); dlbTest(len, out.data(), stream); } protected: cudaStream_t stream = 0; DlbInputs params; rmm::device_uvector<int> out; }; template <typename T, typename L> ::testing::AssertionResult devArrMatchCustom(const T* actual, size_t size, L eq_compare, cudaStream_t stream = 0) { std::vector<T> act_h(size); raft::update_host<T>(&(act_h[0]), actual, size, stream); raft::interruptible::synchronize(stream); for (size_t i(0); i < size; ++i) { auto act = act_h[i]; auto expected = (T)i; if (!eq_compare(expected, act)) { return ::testing::AssertionFailure() << "actual=" << act << " != expected=" << expected << " @" << i; } } return ::testing::AssertionSuccess(); } const std::vector<DlbInputs> inputs = {{4}, {16}, {64}, {256}, {2048}}; TEST_P(DlbTest, Result) { ASSERT_TRUE(devArrMatchCustom(out.data(), params.len, MLCommon::Compare<int>())); } INSTANTIATE_TEST_CASE_P(DlbTests, DlbTest, ::testing::ValuesIn(inputs)); } // end namespace MLCommon
b21477e3874a3f02e014a10e1f2ea0d1e2207725.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include <assert.h> #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 // You can use any other block size you wish. #define BLOCK_SIZE 1024 #define TILE_SIZE 2048 unsigned int *sumArray, *sumArray2, *sumArray3; int blocksL1, blocksL2, blocksL3; // Host Helper Functions (allocate your own data structure...) void preallocBlockSums(int numElements){ blocksL1 = (int)ceil(numElements/(float)TILE_SIZE); hipMalloc((void**) &sumArray, sizeof(unsigned int)*blocksL1); blocksL2 = (int)ceil(blocksL1/(float)TILE_SIZE); hipMalloc((void**) &sumArray2, sizeof(unsigned int)*blocksL2); blocksL3 = (int)ceil(blocksL2/(float)TILE_SIZE); hipMalloc((void**) &sumArray3, sizeof(unsigned int)*blocksL3); } void deallocBlockSums(){ hipFree(sumArray); hipFree(sumArray2); hipFree(sumArray3); } // Device Functions // Kernel Functions __global__ void scanArray(unsigned int *outArray, unsigned int *inArray, unsigned int *sumArray, int numElements){ __shared__ unsigned int tileArray[TILE_SIZE]; int index = blockIdx.x*TILE_SIZE + threadIdx.x; if(index < numElements && (threadIdx.x!=0 || blockIdx.x!=0)) tileArray[threadIdx.x] = inArray[index-1]; else tileArray[threadIdx.x] = 0; if(index+BLOCK_SIZE < numElements) tileArray[threadIdx.x + BLOCK_SIZE] = inArray[index-1 + BLOCK_SIZE]; else tileArray[threadIdx.x + BLOCK_SIZE] = 0; unsigned int id, stride; for(stride=1;stride<TILE_SIZE;stride *= 2){ __syncthreads(); id = (threadIdx.x+1) * 2 * stride - 1; if(id<TILE_SIZE) tileArray[id] += tileArray[id-stride]; } for(stride=TILE_SIZE/4;stride>0;stride /= 2){ __syncthreads(); id = (threadIdx.x+1) * 2 * stride - 1; if(id + stride < TILE_SIZE) tileArray[id+stride] += tileArray[id]; } __syncthreads(); if(threadIdx.x==0) sumArray[blockIdx.x] = tileArray[TILE_SIZE-1]; if(index < numElements) outArray[index] = tileArray[threadIdx.x]; if(index + BLOCK_SIZE < numElements) outArray[index+BLOCK_SIZE] = tileArray[threadIdx.x+BLOCK_SIZE]; } __global__ void vectorAddition(unsigned int *vector, unsigned int *sumVector, int numElements){ int index = blockIdx.x*TILE_SIZE + threadIdx.x; if(index < numElements){ vector[index] += sumVector[blockIdx.x]; } if(index + BLOCK_SIZE < numElements){ vector[index + BLOCK_SIZE] += sumVector[blockIdx.x]; } } // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. Make your own kernel // functions in this file, and then call them from here. // Note that the code has been modified to ensure numElements is a multiple // of TILE_SIZE void prescanArray(unsigned int *outArray, unsigned int *inArray, int numElements) { hipLaunchKernelGGL(( scanArray), dim3(blocksL1), dim3(BLOCK_SIZE), 0, 0, outArray, inArray, sumArray, numElements); if(blocksL1 > 1){ // execute level 2 if more than one block in level 1 hipLaunchKernelGGL(( scanArray), dim3(blocksL2), dim3(BLOCK_SIZE), 0, 0, sumArray, sumArray, sumArray2, blocksL1); if(blocksL2 > 1){ // execute level 3 if more than one block in level 2 // this should ideally have just one block hipLaunchKernelGGL(( scanArray), dim3(blocksL3), dim3(BLOCK_SIZE), 0, 0, sumArray2, sumArray2, sumArray3, blocksL2); hipLaunchKernelGGL(( vectorAddition), dim3(blocksL2), dim3(BLOCK_SIZE), 0, 0, sumArray, sumArray2, blocksL1); } hipLaunchKernelGGL(( vectorAddition), dim3(blocksL1), dim3(BLOCK_SIZE), 0, 0, outArray, sumArray, numElements); } } // **===-----------------------------------------------------------===** #endif // _PRESCAN_CU_
b21477e3874a3f02e014a10e1f2ea0d1e2207725.cu
#ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include <assert.h> #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 // You can use any other block size you wish. #define BLOCK_SIZE 1024 #define TILE_SIZE 2048 unsigned int *sumArray, *sumArray2, *sumArray3; int blocksL1, blocksL2, blocksL3; // Host Helper Functions (allocate your own data structure...) void preallocBlockSums(int numElements){ blocksL1 = (int)ceil(numElements/(float)TILE_SIZE); cudaMalloc((void**) &sumArray, sizeof(unsigned int)*blocksL1); blocksL2 = (int)ceil(blocksL1/(float)TILE_SIZE); cudaMalloc((void**) &sumArray2, sizeof(unsigned int)*blocksL2); blocksL3 = (int)ceil(blocksL2/(float)TILE_SIZE); cudaMalloc((void**) &sumArray3, sizeof(unsigned int)*blocksL3); } void deallocBlockSums(){ cudaFree(sumArray); cudaFree(sumArray2); cudaFree(sumArray3); } // Device Functions // Kernel Functions __global__ void scanArray(unsigned int *outArray, unsigned int *inArray, unsigned int *sumArray, int numElements){ __shared__ unsigned int tileArray[TILE_SIZE]; int index = blockIdx.x*TILE_SIZE + threadIdx.x; if(index < numElements && (threadIdx.x!=0 || blockIdx.x!=0)) tileArray[threadIdx.x] = inArray[index-1]; else tileArray[threadIdx.x] = 0; if(index+BLOCK_SIZE < numElements) tileArray[threadIdx.x + BLOCK_SIZE] = inArray[index-1 + BLOCK_SIZE]; else tileArray[threadIdx.x + BLOCK_SIZE] = 0; unsigned int id, stride; for(stride=1;stride<TILE_SIZE;stride *= 2){ __syncthreads(); id = (threadIdx.x+1) * 2 * stride - 1; if(id<TILE_SIZE) tileArray[id] += tileArray[id-stride]; } for(stride=TILE_SIZE/4;stride>0;stride /= 2){ __syncthreads(); id = (threadIdx.x+1) * 2 * stride - 1; if(id + stride < TILE_SIZE) tileArray[id+stride] += tileArray[id]; } __syncthreads(); if(threadIdx.x==0) sumArray[blockIdx.x] = tileArray[TILE_SIZE-1]; if(index < numElements) outArray[index] = tileArray[threadIdx.x]; if(index + BLOCK_SIZE < numElements) outArray[index+BLOCK_SIZE] = tileArray[threadIdx.x+BLOCK_SIZE]; } __global__ void vectorAddition(unsigned int *vector, unsigned int *sumVector, int numElements){ int index = blockIdx.x*TILE_SIZE + threadIdx.x; if(index < numElements){ vector[index] += sumVector[blockIdx.x]; } if(index + BLOCK_SIZE < numElements){ vector[index + BLOCK_SIZE] += sumVector[blockIdx.x]; } } // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. Make your own kernel // functions in this file, and then call them from here. // Note that the code has been modified to ensure numElements is a multiple // of TILE_SIZE void prescanArray(unsigned int *outArray, unsigned int *inArray, int numElements) { scanArray<<<blocksL1, BLOCK_SIZE>>>(outArray, inArray, sumArray, numElements); if(blocksL1 > 1){ // execute level 2 if more than one block in level 1 scanArray<<<blocksL2, BLOCK_SIZE>>>(sumArray, sumArray, sumArray2, blocksL1); if(blocksL2 > 1){ // execute level 3 if more than one block in level 2 // this should ideally have just one block scanArray<<<blocksL3, BLOCK_SIZE>>>(sumArray2, sumArray2, sumArray3, blocksL2); vectorAddition<<<blocksL2, BLOCK_SIZE>>>(sumArray, sumArray2, blocksL1); } vectorAddition<<<blocksL1, BLOCK_SIZE>>>(outArray, sumArray, numElements); } } // **===-----------------------------------------------------------===** #endif // _PRESCAN_CU_
cf3bc80f242b86bb80fde64dcccc4fa2ba1fda7b.hip
// !!! This is a file automatically generated by hipify!!! /* other things we should test: - struct pointer, with offset - multiple struct pointers, cut from same buffer - getting values from various types of structs passed in */ #include <iostream> #include <memory> #include <cassert> using namespace std; #include <hip/hip_runtime.h> struct Struct_fp_fp_f_f { float *p1; float *p2; float f1; float f2; }; struct Struct_fp { float *p1; }; struct Struct_1float { float f1; }; struct Struct_2floats { float f1; float f2; }; __global__ void struct_byvalue(struct Struct_fp_fp_f_f mystruct, float *out) { out[0] = mystruct.f1; out[1] = mystruct.f2; mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; } void testbyvaluestruct() { int N = 1024; hipStream_t stream; hipStreamCreate__(&stream, 0); float *gpuFloats1; hipMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; hipMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; hipMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *gpuOut; hipMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; float *hostOut = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 3.0f, 8.0f}; hipLaunchKernelGGL(( struct_byvalue), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, mystruct, (float *)gpuOut); hipMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(hostOut, gpuOut, 4 * sizeof(float), hipMemcpyDeviceToHost); hipStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostOut[0] == 3); assert(hostOut[1] == 8); hipFree(gpuFloats1); hipFree(gpuFloats2); hipFree(gpuFloats3); hipFree(gpuOut); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; delete[]hostOut; hipStreamDestroy(stream); } __global__ void struct_aspointer(struct Struct_2floats *mystruct, float *out) { out[0] = mystruct->f1; out[1] = mystruct->f2; } void testaspointerstruct() { int N = 1024; hipStream_t stream; hipStreamCreate__(&stream, 0); float *hostOut = new float[N]; float *gpuOut; hipMalloc((void**)(&gpuOut), N * sizeof(float)); struct Struct_2floats mystruct = { 5, 7 }; struct Struct_2floats *gpu_mystruct; hipMalloc((void**)(&gpu_mystruct), sizeof(mystruct)); hipMemcpy(gpu_mystruct, &mystruct, sizeof(mystruct), hipMemcpyHostToDevice); hipLaunchKernelGGL(( struct_aspointer), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, gpu_mystruct, gpuOut); hipMemcpy(hostOut, gpuOut, 4 * sizeof(float), hipMemcpyDeviceToHost); hipStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); delete[]hostOut; hipStreamDestroy(stream); } __global__ void kernel_twostructs(struct Struct_fp_fp_f_f mystruct, struct Struct_fp mystruct2) { mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; mystruct2.p1[0] = 11.0f; } void testtwostructs() { int N = 1024; hipStream_t stream; hipStreamCreate__(&stream, 0); float *gpuFloats1; hipMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; hipMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; hipMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 0.0f, 0.0f}; struct Struct_fp mystruct2 = {(float *)gpuFloats3}; hipLaunchKernelGGL(( kernel_twostructs), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, mystruct, mystruct2); hipMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(hostFloats3, gpuFloats3, 4 * sizeof(float), hipMemcpyDeviceToHost); hipStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostFloats3[0] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostFloats3[0] == 11); hipFree(gpuFloats1); hipFree(gpuFloats2); hipFree(gpuFloats3); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; hipStreamDestroy(stream); } __global__ void kernel_structbyval_noptrs(struct Struct_1float mystruct1, float *out) { if(threadIdx.x == 0) { out[0] = mystruct1.f1; out[1] = 5; } } void teststructbyvalNoPtr() { int N = 1024; hipStream_t stream; hipStreamCreate__(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; hipMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_1float mystruct1 = {8.0f}; hipLaunchKernelGGL(( kernel_structbyval_noptrs), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, mystruct1, (float *)gpuFloats1); hipMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), hipMemcpyDeviceToHost); hipStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; assert(hostFloats1[0] == 8); assert(hostFloats1[1] == 5); delete[] hostFloats1; hipFree(gpuFloats1); hipStreamDestroy(stream); } __global__ void kernel_twostructs_noptrs(struct Struct_2floats *mystruct, struct Struct_1float *mystruct2, struct Struct_1float mystruct3, float *out) { if(threadIdx.x == 0) { out[0] = mystruct->f1; out[1] = mystruct->f2; out[2] = mystruct2->f1; out[3] = mystruct3.f1; } } void test_twostructs_byptr_NoPtr() { int N = 1024; hipStream_t stream; hipStreamCreate__(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; hipMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_2floats mystruct = {5.0f, 6.0f}; struct Struct_1float mystruct2 = {7.0f}; struct Struct_1float mystruct3 = {8.0f}; struct Struct_2floats *gpu_mystruct; hipMalloc((void**)(&gpu_mystruct), sizeof(mystruct)); hipMemcpy(gpu_mystruct, &mystruct, sizeof(mystruct), hipMemcpyHostToDevice); struct Struct_1float *gpu_mystruct2; hipMalloc((void**)(&gpu_mystruct2), sizeof(mystruct2)); hipMemcpy(gpu_mystruct2, &mystruct2, sizeof(mystruct2), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_twostructs_noptrs), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, gpu_mystruct, gpu_mystruct2, mystruct3, (float *)gpuFloats1); hipMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), hipMemcpyDeviceToHost); hipStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; cout << hostFloats1[2] << endl; cout << hostFloats1[3] << endl; assert(hostFloats1[0] == 5); assert(hostFloats1[1] == 6); assert(hostFloats1[2] == 7); assert(hostFloats1[3] == 8); hipFree(gpuFloats1); hipFree(gpu_mystruct); hipFree(gpu_mystruct2); // hipFree(gpu_mystruct3); delete[] hostFloats1; hipStreamDestroy(stream); } __global__ void kernel_struct2byval_noptrs(struct Struct_2floats mystruct1, float *out) { if(threadIdx.x == 0) { out[0] = mystruct1.f1; out[1] = mystruct1.f2; } } void teststruct2byvalNoPtr() { int N = 1024; hipStream_t stream; hipStreamCreate__(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; hipMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_2floats mystruct1 = {8.0f, 9.0f}; hipLaunchKernelGGL(( kernel_struct2byval_noptrs), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, mystruct1, (float *)gpuFloats1); hipMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), hipMemcpyDeviceToHost); hipStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; assert(hostFloats1[0] == 8); assert(hostFloats1[1] == 9); delete[] hostFloats1; hipFree(gpuFloats1); hipStreamDestroy(stream); } struct struct_f_c_f_c { float f1; char c1; float f2; char c2; }; __global__ void kernel_twostructs_gpuside_singlebuffer(struct struct_f_c_f_c *mystruct1, struct struct_f_c_f_c *mystruct2, float *out) { out[0] = mystruct1->f1; out[1] = mystruct1->f2; out[2] = mystruct2->f1; out[3] = mystruct2->f2; } void test_twostructs_gpuside_singlebuffer() { int N = 1024; hipStream_t stream; hipStreamCreate__(&stream, 0); float *hostOut = new float[N]; float *gpuOut; hipMalloc((void**)(&gpuOut), N * sizeof(float)); char *gpubuf; hipMalloc((void **)&gpubuf, 1024); int offset1 = 24; int offset2 = 40; struct struct_f_c_f_c mystruct1 = { 5, 0, 7, 0 }; hipMemcpy(gpubuf + offset1, &mystruct1, sizeof(mystruct1), hipMemcpyHostToDevice); struct struct_f_c_f_c mystruct2 = { 9, 0, 3, 0 }; hipMemcpy(gpubuf + offset2, &mystruct2, sizeof(mystruct2), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_twostructs_gpuside_singlebuffer), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, (struct struct_f_c_f_c *)(gpubuf + offset1), (struct struct_f_c_f_c *)(gpubuf + offset2), gpuOut); hipMemcpy(hostOut, gpuOut, 4 * sizeof(float), hipMemcpyDeviceToHost); hipStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; cout << hostOut[2] << endl; cout << hostOut[3] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); assert(hostOut[2] == 9); assert(hostOut[3] == 3); delete[]hostOut; hipStreamDestroy(stream); } struct NestL2 { float floats[10]; }; struct NestL1 { struct NestL2 n1; struct NestL2 n2; }; struct NestTop { struct NestL1 n1; struct NestL1 n2; }; __global__ void kernelUseNestTop(NestTop nest, float *out) { out[0] = nest.n1.n1.floats[0]; out[1] = nest.n1.n1.floats[1]; } void testKernelUsesNestTop() { int N = 1024; hipStream_t stream; hipStreamCreate__(&stream, 0); float *gpuOut; hipMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostOut = new float[N]; struct NestTop nestTop; nestTop.n1.n1.floats[0] = 5; nestTop.n1.n1.floats[1] = 7; hipLaunchKernelGGL(( kernelUseNestTop), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, nestTop, (float *)gpuOut); hipMemcpy(hostOut, gpuOut, 4 * sizeof(float), hipMemcpyDeviceToHost); hipStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); hipFree(gpuOut); delete[]hostOut; hipStreamDestroy(stream); } __global__ void struct_byvalue_withreadnone(struct Struct_fp_fp_f_f mystruct, struct Struct_fp_fp_f_f donothing, float *out) { out[0] = mystruct.f1; out[1] = mystruct.f2; mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; } void testbyvaluestruct_withreadnone() { int N = 1024; hipStream_t stream; hipStreamCreate__(&stream, 0); float *gpuFloats1; hipMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; hipMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; hipMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *gpuOut; hipMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; float *hostOut = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 3.0f, 8.0f}; struct Struct_fp_fp_f_f donothing = {(float *)0, (float *)0, 0.0f, 0.0f}; hipLaunchKernelGGL(( struct_byvalue_withreadnone), dim3(dim3(1,1,1)), dim3(dim3(32,1,1)), 0, stream, mystruct, donothing, (float *)gpuOut); hipMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(hostOut, gpuOut, 4 * sizeof(float), hipMemcpyDeviceToHost); hipStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostOut[0] == 3); assert(hostOut[1] == 8); hipFree(gpuFloats1); hipFree(gpuFloats2); hipFree(gpuFloats3); hipFree(gpuOut); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; delete[]hostOut; hipStreamDestroy(stream); } int main(int argc, char *argv[]) { cout << "\ntestvaluestruct" << endl; testbyvaluestruct(); cout << "\ntestaspointersstruct" << endl; testaspointerstruct(); cout << "\ntesttwostructs" << endl; testtwostructs(); cout << "\teststructbyvalNoPtr" << endl; teststructbyvalNoPtr(); cout << "\ntest_twostructs_byptr_NoPtr" << endl; test_twostructs_byptr_NoPtr(); cout << "\teststruct2byvalNoPtr" << endl; teststruct2byvalNoPtr(); cout << "\test_twostructs_gpuside_singlebuffer" << endl; test_twostructs_gpuside_singlebuffer(); cout << "\ntestKernelUsesNestTop" << endl; testKernelUsesNestTop(); cout << "\ntestvaluestruct_withreadnone" << endl; testbyvaluestruct_withreadnone(); return 0; }
cf3bc80f242b86bb80fde64dcccc4fa2ba1fda7b.cu
/* other things we should test: - struct pointer, with offset - multiple struct pointers, cut from same buffer - getting values from various types of structs passed in */ #include <iostream> #include <memory> #include <cassert> using namespace std; #include <cuda.h> struct Struct_fp_fp_f_f { float *p1; float *p2; float f1; float f2; }; struct Struct_fp { float *p1; }; struct Struct_1float { float f1; }; struct Struct_2floats { float f1; float f2; }; __global__ void struct_byvalue(struct Struct_fp_fp_f_f mystruct, float *out) { out[0] = mystruct.f1; out[1] = mystruct.f2; mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; } void testbyvaluestruct() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; cudaMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; cudaMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; float *hostOut = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 3.0f, 8.0f}; struct_byvalue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct, (float *)gpuOut); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostOut[0] == 3); assert(hostOut[1] == 8); cudaFree(gpuFloats1); cudaFree(gpuFloats2); cudaFree(gpuFloats3); cudaFree(gpuOut); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; delete[]hostOut; cuStreamDestroy(stream); } __global__ void struct_aspointer(struct Struct_2floats *mystruct, float *out) { out[0] = mystruct->f1; out[1] = mystruct->f2; } void testaspointerstruct() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostOut = new float[N]; float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); struct Struct_2floats mystruct = { 5, 7 }; struct Struct_2floats *gpu_mystruct; cudaMalloc((void**)(&gpu_mystruct), sizeof(mystruct)); cudaMemcpy(gpu_mystruct, &mystruct, sizeof(mystruct), cudaMemcpyHostToDevice); struct_aspointer<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(gpu_mystruct, gpuOut); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); delete[]hostOut; cuStreamDestroy(stream); } __global__ void kernel_twostructs(struct Struct_fp_fp_f_f mystruct, struct Struct_fp mystruct2) { mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; mystruct2.p1[0] = 11.0f; } void testtwostructs() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; cudaMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; cudaMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 0.0f, 0.0f}; struct Struct_fp mystruct2 = {(float *)gpuFloats3}; kernel_twostructs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct, mystruct2); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats3, gpuFloats3, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostFloats3[0] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostFloats3[0] == 11); cudaFree(gpuFloats1); cudaFree(gpuFloats2); cudaFree(gpuFloats3); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; cuStreamDestroy(stream); } __global__ void kernel_structbyval_noptrs(struct Struct_1float mystruct1, float *out) { if(threadIdx.x == 0) { out[0] = mystruct1.f1; out[1] = 5; } } void teststructbyvalNoPtr() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_1float mystruct1 = {8.0f}; kernel_structbyval_noptrs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct1, (float *)gpuFloats1); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; assert(hostFloats1[0] == 8); assert(hostFloats1[1] == 5); delete[] hostFloats1; cudaFree(gpuFloats1); cuStreamDestroy(stream); } __global__ void kernel_twostructs_noptrs(struct Struct_2floats *mystruct, struct Struct_1float *mystruct2, struct Struct_1float mystruct3, float *out) { if(threadIdx.x == 0) { out[0] = mystruct->f1; out[1] = mystruct->f2; out[2] = mystruct2->f1; out[3] = mystruct3.f1; } } void test_twostructs_byptr_NoPtr() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_2floats mystruct = {5.0f, 6.0f}; struct Struct_1float mystruct2 = {7.0f}; struct Struct_1float mystruct3 = {8.0f}; struct Struct_2floats *gpu_mystruct; cudaMalloc((void**)(&gpu_mystruct), sizeof(mystruct)); cudaMemcpy(gpu_mystruct, &mystruct, sizeof(mystruct), cudaMemcpyHostToDevice); struct Struct_1float *gpu_mystruct2; cudaMalloc((void**)(&gpu_mystruct2), sizeof(mystruct2)); cudaMemcpy(gpu_mystruct2, &mystruct2, sizeof(mystruct2), cudaMemcpyHostToDevice); kernel_twostructs_noptrs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(gpu_mystruct, gpu_mystruct2, mystruct3, (float *)gpuFloats1); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; cout << hostFloats1[2] << endl; cout << hostFloats1[3] << endl; assert(hostFloats1[0] == 5); assert(hostFloats1[1] == 6); assert(hostFloats1[2] == 7); assert(hostFloats1[3] == 8); cudaFree(gpuFloats1); cudaFree(gpu_mystruct); cudaFree(gpu_mystruct2); // cudaFree(gpu_mystruct3); delete[] hostFloats1; cuStreamDestroy(stream); } __global__ void kernel_struct2byval_noptrs(struct Struct_2floats mystruct1, float *out) { if(threadIdx.x == 0) { out[0] = mystruct1.f1; out[1] = mystruct1.f2; } } void teststruct2byvalNoPtr() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_2floats mystruct1 = {8.0f, 9.0f}; kernel_struct2byval_noptrs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct1, (float *)gpuFloats1); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; assert(hostFloats1[0] == 8); assert(hostFloats1[1] == 9); delete[] hostFloats1; cudaFree(gpuFloats1); cuStreamDestroy(stream); } struct struct_f_c_f_c { float f1; char c1; float f2; char c2; }; __global__ void kernel_twostructs_gpuside_singlebuffer(struct struct_f_c_f_c *mystruct1, struct struct_f_c_f_c *mystruct2, float *out) { out[0] = mystruct1->f1; out[1] = mystruct1->f2; out[2] = mystruct2->f1; out[3] = mystruct2->f2; } void test_twostructs_gpuside_singlebuffer() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostOut = new float[N]; float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); char *gpubuf; cudaMalloc((void **)&gpubuf, 1024); int offset1 = 24; int offset2 = 40; struct struct_f_c_f_c mystruct1 = { 5, 0, 7, 0 }; cudaMemcpy(gpubuf + offset1, &mystruct1, sizeof(mystruct1), cudaMemcpyHostToDevice); struct struct_f_c_f_c mystruct2 = { 9, 0, 3, 0 }; cudaMemcpy(gpubuf + offset2, &mystruct2, sizeof(mystruct2), cudaMemcpyHostToDevice); kernel_twostructs_gpuside_singlebuffer<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>( (struct struct_f_c_f_c *)(gpubuf + offset1), (struct struct_f_c_f_c *)(gpubuf + offset2), gpuOut); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; cout << hostOut[2] << endl; cout << hostOut[3] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); assert(hostOut[2] == 9); assert(hostOut[3] == 3); delete[]hostOut; cuStreamDestroy(stream); } struct NestL2 { float floats[10]; }; struct NestL1 { struct NestL2 n1; struct NestL2 n2; }; struct NestTop { struct NestL1 n1; struct NestL1 n2; }; __global__ void kernelUseNestTop(NestTop nest, float *out) { out[0] = nest.n1.n1.floats[0]; out[1] = nest.n1.n1.floats[1]; } void testKernelUsesNestTop() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostOut = new float[N]; struct NestTop nestTop; nestTop.n1.n1.floats[0] = 5; nestTop.n1.n1.floats[1] = 7; kernelUseNestTop<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(nestTop, (float *)gpuOut); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); cudaFree(gpuOut); delete[]hostOut; cuStreamDestroy(stream); } __global__ void struct_byvalue_withreadnone(struct Struct_fp_fp_f_f mystruct, struct Struct_fp_fp_f_f donothing, float *out) { out[0] = mystruct.f1; out[1] = mystruct.f2; mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; } void testbyvaluestruct_withreadnone() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; cudaMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; cudaMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; float *hostOut = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 3.0f, 8.0f}; struct Struct_fp_fp_f_f donothing = {(float *)0, (float *)0, 0.0f, 0.0f}; struct_byvalue_withreadnone<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct, donothing, (float *)gpuOut); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostOut[0] == 3); assert(hostOut[1] == 8); cudaFree(gpuFloats1); cudaFree(gpuFloats2); cudaFree(gpuFloats3); cudaFree(gpuOut); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; delete[]hostOut; cuStreamDestroy(stream); } int main(int argc, char *argv[]) { cout << "\ntestvaluestruct" << endl; testbyvaluestruct(); cout << "\ntestaspointersstruct" << endl; testaspointerstruct(); cout << "\ntesttwostructs" << endl; testtwostructs(); cout << "\teststructbyvalNoPtr" << endl; teststructbyvalNoPtr(); cout << "\ntest_twostructs_byptr_NoPtr" << endl; test_twostructs_byptr_NoPtr(); cout << "\teststruct2byvalNoPtr" << endl; teststruct2byvalNoPtr(); cout << "\test_twostructs_gpuside_singlebuffer" << endl; test_twostructs_gpuside_singlebuffer(); cout << "\ntestKernelUsesNestTop" << endl; testKernelUsesNestTop(); cout << "\ntestvaluestruct_withreadnone" << endl; testbyvaluestruct_withreadnone(); return 0; }
95ea40b92db8d815bf73623902b200dfbdfedc76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_z3; int xdim0_advec_mom_kernel_z3_h = -1; __constant__ int ydim0_advec_mom_kernel_z3; int ydim0_advec_mom_kernel_z3_h = -1; __constant__ int xdim1_advec_mom_kernel_z3; int xdim1_advec_mom_kernel_z3_h = -1; __constant__ int ydim1_advec_mom_kernel_z3; int ydim1_advec_mom_kernel_z3_h = -1; __constant__ int xdim2_advec_mom_kernel_z3; int xdim2_advec_mom_kernel_z3_h = -1; __constant__ int ydim2_advec_mom_kernel_z3; int ydim2_advec_mom_kernel_z3_h = -1; __constant__ int xdim3_advec_mom_kernel_z3; int xdim3_advec_mom_kernel_z3_h = -1; __constant__ int ydim3_advec_mom_kernel_z3; int ydim3_advec_mom_kernel_z3_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel_z3*(y)+xdim0_advec_mom_kernel_z3*ydim0_advec_mom_kernel_z3*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel_z3*(y)+xdim1_advec_mom_kernel_z3*ydim1_advec_mom_kernel_z3*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel_z3*(y)+xdim2_advec_mom_kernel_z3*ydim2_advec_mom_kernel_z3*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel_z3*(y)+xdim3_advec_mom_kernel_z3*ydim3_advec_mom_kernel_z3*(z)) //user function __device__ inline void advec_mom_kernel_z3_gpu( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_z) { post_vol[OPS_ACC1(0,0,0)] = volume[OPS_ACC2(0,0,0)]; pre_vol[OPS_ACC0(0,0,0)] = post_vol[OPS_ACC1(0,0,0)] + vol_flux_z[OPS_ACC3(0,0,1)] - vol_flux_z[OPS_ACC3(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_advec_mom_kernel_z3( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel_z3 + idx_z * 1*1 * xdim0_advec_mom_kernel_z3 * ydim0_advec_mom_kernel_z3; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel_z3 + idx_z * 1*1 * xdim1_advec_mom_kernel_z3 * ydim1_advec_mom_kernel_z3; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel_z3 + idx_z * 1*1 * xdim2_advec_mom_kernel_z3 * ydim2_advec_mom_kernel_z3; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_mom_kernel_z3 + idx_z * 1*1 * xdim3_advec_mom_kernel_z3 * ydim3_advec_mom_kernel_z3; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_z3_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel_z3_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,125)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(125,"advec_mom_kernel_z3"); OPS_kernels[125].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_z3_h || ydim0 != ydim0_advec_mom_kernel_z3_h || xdim1 != xdim1_advec_mom_kernel_z3_h || ydim1 != ydim1_advec_mom_kernel_z3_h || xdim2 != xdim2_advec_mom_kernel_z3_h || ydim2 != ydim2_advec_mom_kernel_z3_h || xdim3 != xdim3_advec_mom_kernel_z3_h || ydim3 != ydim3_advec_mom_kernel_z3_h) { hipMemcpyToSymbol( xdim0_advec_mom_kernel_z3, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel_z3_h = xdim0; hipMemcpyToSymbol( ydim0_advec_mom_kernel_z3, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel_z3_h = ydim0; hipMemcpyToSymbol( xdim1_advec_mom_kernel_z3, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel_z3_h = xdim1; hipMemcpyToSymbol( ydim1_advec_mom_kernel_z3, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel_z3_h = ydim1; hipMemcpyToSymbol( xdim2_advec_mom_kernel_z3, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel_z3_h = xdim2; hipMemcpyToSymbol( ydim2_advec_mom_kernel_z3, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel_z3_h = ydim2; hipMemcpyToSymbol( xdim3_advec_mom_kernel_z3, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel_z3_h = xdim3; hipMemcpyToSymbol( ydim3_advec_mom_kernel_z3, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel_z3_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[125].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel_z3), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[125].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[125].mpi_time += t2-t1; OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 125; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 125; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel_z3_execute; if (OPS_diags > 1) { ops_timing_realloc(125,"advec_mom_kernel_z3"); } ops_enqueue_kernel(desc); } #endif
95ea40b92db8d815bf73623902b200dfbdfedc76.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_z3; int xdim0_advec_mom_kernel_z3_h = -1; __constant__ int ydim0_advec_mom_kernel_z3; int ydim0_advec_mom_kernel_z3_h = -1; __constant__ int xdim1_advec_mom_kernel_z3; int xdim1_advec_mom_kernel_z3_h = -1; __constant__ int ydim1_advec_mom_kernel_z3; int ydim1_advec_mom_kernel_z3_h = -1; __constant__ int xdim2_advec_mom_kernel_z3; int xdim2_advec_mom_kernel_z3_h = -1; __constant__ int ydim2_advec_mom_kernel_z3; int ydim2_advec_mom_kernel_z3_h = -1; __constant__ int xdim3_advec_mom_kernel_z3; int xdim3_advec_mom_kernel_z3_h = -1; __constant__ int ydim3_advec_mom_kernel_z3; int ydim3_advec_mom_kernel_z3_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel_z3*(y)+xdim0_advec_mom_kernel_z3*ydim0_advec_mom_kernel_z3*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel_z3*(y)+xdim1_advec_mom_kernel_z3*ydim1_advec_mom_kernel_z3*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel_z3*(y)+xdim2_advec_mom_kernel_z3*ydim2_advec_mom_kernel_z3*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel_z3*(y)+xdim3_advec_mom_kernel_z3*ydim3_advec_mom_kernel_z3*(z)) //user function __device__ inline void advec_mom_kernel_z3_gpu( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_z) { post_vol[OPS_ACC1(0,0,0)] = volume[OPS_ACC2(0,0,0)]; pre_vol[OPS_ACC0(0,0,0)] = post_vol[OPS_ACC1(0,0,0)] + vol_flux_z[OPS_ACC3(0,0,1)] - vol_flux_z[OPS_ACC3(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_advec_mom_kernel_z3( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel_z3 + idx_z * 1*1 * xdim0_advec_mom_kernel_z3 * ydim0_advec_mom_kernel_z3; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel_z3 + idx_z * 1*1 * xdim1_advec_mom_kernel_z3 * ydim1_advec_mom_kernel_z3; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel_z3 + idx_z * 1*1 * xdim2_advec_mom_kernel_z3 * ydim2_advec_mom_kernel_z3; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_mom_kernel_z3 + idx_z * 1*1 * xdim3_advec_mom_kernel_z3 * ydim3_advec_mom_kernel_z3; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_z3_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel_z3_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,125)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(125,"advec_mom_kernel_z3"); OPS_kernels[125].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_z3_h || ydim0 != ydim0_advec_mom_kernel_z3_h || xdim1 != xdim1_advec_mom_kernel_z3_h || ydim1 != ydim1_advec_mom_kernel_z3_h || xdim2 != xdim2_advec_mom_kernel_z3_h || ydim2 != ydim2_advec_mom_kernel_z3_h || xdim3 != xdim3_advec_mom_kernel_z3_h || ydim3 != ydim3_advec_mom_kernel_z3_h) { cudaMemcpyToSymbol( xdim0_advec_mom_kernel_z3, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel_z3_h = xdim0; cudaMemcpyToSymbol( ydim0_advec_mom_kernel_z3, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel_z3_h = ydim0; cudaMemcpyToSymbol( xdim1_advec_mom_kernel_z3, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel_z3_h = xdim1; cudaMemcpyToSymbol( ydim1_advec_mom_kernel_z3, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel_z3_h = ydim1; cudaMemcpyToSymbol( xdim2_advec_mom_kernel_z3, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel_z3_h = xdim2; cudaMemcpyToSymbol( ydim2_advec_mom_kernel_z3, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel_z3_h = ydim2; cudaMemcpyToSymbol( xdim3_advec_mom_kernel_z3, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel_z3_h = xdim3; cudaMemcpyToSymbol( ydim3_advec_mom_kernel_z3, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel_z3_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[125].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel_z3<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[125].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[125].mpi_time += t2-t1; OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[125].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_z3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 125; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 125; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel_z3_execute; if (OPS_diags > 1) { ops_timing_realloc(125,"advec_mom_kernel_z3"); } ops_enqueue_kernel(desc); } #endif
d5ba443153a6a2948619a0bb62a990db035780a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpuPathtracer.h" #include "../static_scene/triangle.h" #include "../static_scene/object.h" #include "gpuRay.cu" #include "gpuTriangle.cu" #include "gpuVector3D.cu" #include "gpuCamera.cu" //#include "gpuBvh.cu" #include "gpuBBox.cu" #ifdef DEBUG #define CHK(ans) {gpuAssert((ans), __FILE__, __LINE__);} #define POSTKERNEL CHK(hipPeekAtLastError()) #else #define CHK(ans) #endif inline void gpuAssert(hipError_t code, const char *file, int line) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %s\n", hipGetErrorString(code),file,line); exit(code); } } using namespace CMU462; using namespace StaticScene; __constant__ gpuTriangle* primitives; //__constant__ gpuCamera* camera_const; __constant__ int* imagePixels_const; __constant__ size_t w_d; __constant__ size_t h_d; __constant__ size_t numPrim; __constant__ gpuVector3D* pos; int* imagePixels; gpuCamera* camera; gpuTriangle* gpu_primitives; gpuVector3D *pos_d; // returns the result of ray tracing intersection with the scene primitives __device__ int trace_ray(gpuRay ray) { for(size_t i = 0; i < numPrim; i++) { if(primitives[i].intersect(ray)) return 1; } return 0; } // Using the x and y position of the pixel, create a ray and use trace_ray __device__ int raytrace_pixel(size_t x, size_t y,gpuCamera* cam) { gpuVector3D p((x + 0.5)/w_d,(y + 0.5)/h_d,0); return trace_ray(cam->generate_ray(p.x,p.y)); } // kernel for doing raytracing __global__ void render(gpuCamera* cam) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; size_t x = index % w_d; size_t y = index / w_d; //printf("%u\n", index); if(x < w_d && y < h_d) { imagePixels_const[index] = raytrace_pixel(x,y,cam); } } gpuPathTracer::gpuPathTracer(PathTracer *__pathtracer) { pathtracer = __pathtracer; } gpuPathTracer::~gpuPathTracer() { hipFree(camera); hipFree(imagePixels); hipFree(gpu_primitives); hipFree(pos_d); } void gpuPathTracer::load_scene() { timer.start(); // using the CPU's bvh, load the mesh information size_t num_tri = pathtracer->bvh->primitives.size(); printf("Triangles in scene: %u\n",num_tri); // Allocate the primitives/vertices hipMalloc((void**)&pos_d,sizeof(gpuVector3D) * num_tri*3); hipMalloc((void**)&gpu_primitives,sizeof(gpuTriangle) * num_tri); hipMemcpyToSymbol(pos,&pos_d,sizeof(gpuVector3D*)); hipMemcpyToSymbol(primitives,&gpu_primitives,sizeof(gpuTriangle*)); hipMemcpyToSymbol(numPrim,&num_tri,sizeof(size_t)); // Copy over the triangles and their vertices into gpu versions gpuTriangle* temp_tri = new gpuTriangle[num_tri]; gpuVector3D* pos_temp = new gpuVector3D[num_tri*3]; for(size_t i = 0; i < num_tri; i++) { // Get the triangle and its vectors size_t offset = i * 3; Triangle* tri = ((Triangle*)(pathtracer->bvh->primitives[i])); Vector3D p1 = tri->mesh->positions[tri->v1]; Vector3D p2 = tri->mesh->positions[tri->v2]; Vector3D p3 = tri->mesh->positions[tri->v3]; // Copy over the vectors pos_temp[offset] = gpuVector3D(p1.x,p1.y,p1.z); pos_temp[offset+1] = gpuVector3D(p2.x,p2.y,p2.z); pos_temp[offset+2] = gpuVector3D(p3.x,p3.y,p3.z); // Create the gpuTriangle object temp_tri[i] = gpuTriangle(pos_d,offset,offset+1,offset+2); } // Copy over the vertices and normals of the triangles hipMemcpy(pos_d, pos_temp, sizeof(gpuVector3D) * num_tri*3, hipMemcpyHostToDevice); hipMemcpy(gpu_primitives,temp_tri,sizeof(gpuTriangle) * num_tri, hipMemcpyHostToDevice); timer.stop(); printf("[GPU Pathtracer]: finished loading scene (%.4f sec)\n",timer.duration()); } void gpuPathTracer::load_camera(Camera *cam) { gpuCamera temp = gpuCamera(cam->c2w, cam->position(), cam->screenW, cam->screenH, cam->screenDist); hipMalloc((void**)&camera,sizeof(gpuCamera)); hipMemcpy(camera,&temp,sizeof(gpuCamera),hipMemcpyHostToDevice); hipMemcpy(&temp,camera,sizeof(gpuCamera),hipMemcpyDeviceToHost); printf("w %u, h %u, d %f\n",temp.screenW, temp.screenH, temp.screenDist); printf("w %u, h %u, d %f\n",cam->screenW, cam->screenH, cam->screenDist); //hipMemcpyToSymbol(camera_const,camera,sizeof(gpuCamera*),hipMemcpyHostToDevice); } void gpuPathTracer::set_frame_size(size_t width, size_t height) { w = width; h = height; hipMemcpyToSymbol(w_d,&w,sizeof(size_t)); hipMemcpyToSymbol(h_d,&h,sizeof(size_t)); // reallocate the imagePixels buffer hipMalloc((void**)&imagePixels,sizeof(int) * w * h); hipMemcpyToSymbol(imagePixels_const,&imagePixels,sizeof(int*)); } // Takes the int imagePixels and draws it on the screen as b/w pixels void gpuPathTracer::update_screen() { Color white(1, 1, 1, 1); Color black(0, 0, 0, 0); int *tmp = new int[w * h]; hipMemcpy(tmp, imagePixels, w * h * sizeof(int), hipMemcpyDeviceToHost); //copy imagePixels into pathtracer->frameBuffer for(size_t i = 0; i < h; i++) { for(size_t j = 0; j < w; j++) { if(tmp[i * w + j]) { pathtracer->frameBuffer.update_pixel(white, j, i); } else { pathtracer->frameBuffer.update_pixel(black, j, i); } } } delete[] tmp; pathtracer->doneState(); } // Wrapper for lanching the render() kernel void gpuPathTracer::start_raytrace() { timer.start(); size_t numBlocks = (w * h + 512 -1)/512; hipLaunchKernelGGL(( render), dim3(numBlocks),dim3(512), 0, 0, camera); hipDeviceSynchronize(); timer.stop(); printf("[GPU Pathtracer]: finished rendering scene (%.4f sec)\n",timer.duration()); }
d5ba443153a6a2948619a0bb62a990db035780a0.cu
#include "gpuPathtracer.h" #include "../static_scene/triangle.h" #include "../static_scene/object.h" #include "gpuRay.cu" #include "gpuTriangle.cu" #include "gpuVector3D.cu" #include "gpuCamera.cu" //#include "gpuBvh.cu" #include "gpuBBox.cu" #ifdef DEBUG #define CHK(ans) {gpuAssert((ans), __FILE__, __LINE__);} #define POSTKERNEL CHK(cudaPeekAtLastError()) #else #define CHK(ans) #endif inline void gpuAssert(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %s\n", cudaGetErrorString(code),file,line); exit(code); } } using namespace CMU462; using namespace StaticScene; __constant__ gpuTriangle* primitives; //__constant__ gpuCamera* camera_const; __constant__ int* imagePixels_const; __constant__ size_t w_d; __constant__ size_t h_d; __constant__ size_t numPrim; __constant__ gpuVector3D* pos; int* imagePixels; gpuCamera* camera; gpuTriangle* gpu_primitives; gpuVector3D *pos_d; // returns the result of ray tracing intersection with the scene primitives __device__ int trace_ray(gpuRay ray) { for(size_t i = 0; i < numPrim; i++) { if(primitives[i].intersect(ray)) return 1; } return 0; } // Using the x and y position of the pixel, create a ray and use trace_ray __device__ int raytrace_pixel(size_t x, size_t y,gpuCamera* cam) { gpuVector3D p((x + 0.5)/w_d,(y + 0.5)/h_d,0); return trace_ray(cam->generate_ray(p.x,p.y)); } // kernel for doing raytracing __global__ void render(gpuCamera* cam) { size_t index = blockIdx.x * blockDim.x + threadIdx.x; size_t x = index % w_d; size_t y = index / w_d; //printf("%u\n", index); if(x < w_d && y < h_d) { imagePixels_const[index] = raytrace_pixel(x,y,cam); } } gpuPathTracer::gpuPathTracer(PathTracer *__pathtracer) { pathtracer = __pathtracer; } gpuPathTracer::~gpuPathTracer() { cudaFree(camera); cudaFree(imagePixels); cudaFree(gpu_primitives); cudaFree(pos_d); } void gpuPathTracer::load_scene() { timer.start(); // using the CPU's bvh, load the mesh information size_t num_tri = pathtracer->bvh->primitives.size(); printf("Triangles in scene: %u\n",num_tri); // Allocate the primitives/vertices cudaMalloc((void**)&pos_d,sizeof(gpuVector3D) * num_tri*3); cudaMalloc((void**)&gpu_primitives,sizeof(gpuTriangle) * num_tri); cudaMemcpyToSymbol(pos,&pos_d,sizeof(gpuVector3D*)); cudaMemcpyToSymbol(primitives,&gpu_primitives,sizeof(gpuTriangle*)); cudaMemcpyToSymbol(numPrim,&num_tri,sizeof(size_t)); // Copy over the triangles and their vertices into gpu versions gpuTriangle* temp_tri = new gpuTriangle[num_tri]; gpuVector3D* pos_temp = new gpuVector3D[num_tri*3]; for(size_t i = 0; i < num_tri; i++) { // Get the triangle and its vectors size_t offset = i * 3; Triangle* tri = ((Triangle*)(pathtracer->bvh->primitives[i])); Vector3D p1 = tri->mesh->positions[tri->v1]; Vector3D p2 = tri->mesh->positions[tri->v2]; Vector3D p3 = tri->mesh->positions[tri->v3]; // Copy over the vectors pos_temp[offset] = gpuVector3D(p1.x,p1.y,p1.z); pos_temp[offset+1] = gpuVector3D(p2.x,p2.y,p2.z); pos_temp[offset+2] = gpuVector3D(p3.x,p3.y,p3.z); // Create the gpuTriangle object temp_tri[i] = gpuTriangle(pos_d,offset,offset+1,offset+2); } // Copy over the vertices and normals of the triangles cudaMemcpy(pos_d, pos_temp, sizeof(gpuVector3D) * num_tri*3, cudaMemcpyHostToDevice); cudaMemcpy(gpu_primitives,temp_tri,sizeof(gpuTriangle) * num_tri, cudaMemcpyHostToDevice); timer.stop(); printf("[GPU Pathtracer]: finished loading scene (%.4f sec)\n",timer.duration()); } void gpuPathTracer::load_camera(Camera *cam) { gpuCamera temp = gpuCamera(cam->c2w, cam->position(), cam->screenW, cam->screenH, cam->screenDist); cudaMalloc((void**)&camera,sizeof(gpuCamera)); cudaMemcpy(camera,&temp,sizeof(gpuCamera),cudaMemcpyHostToDevice); cudaMemcpy(&temp,camera,sizeof(gpuCamera),cudaMemcpyDeviceToHost); printf("w %u, h %u, d %f\n",temp.screenW, temp.screenH, temp.screenDist); printf("w %u, h %u, d %f\n",cam->screenW, cam->screenH, cam->screenDist); //cudaMemcpyToSymbol(camera_const,camera,sizeof(gpuCamera*),cudaMemcpyHostToDevice); } void gpuPathTracer::set_frame_size(size_t width, size_t height) { w = width; h = height; cudaMemcpyToSymbol(w_d,&w,sizeof(size_t)); cudaMemcpyToSymbol(h_d,&h,sizeof(size_t)); // reallocate the imagePixels buffer cudaMalloc((void**)&imagePixels,sizeof(int) * w * h); cudaMemcpyToSymbol(imagePixels_const,&imagePixels,sizeof(int*)); } // Takes the int imagePixels and draws it on the screen as b/w pixels void gpuPathTracer::update_screen() { Color white(1, 1, 1, 1); Color black(0, 0, 0, 0); int *tmp = new int[w * h]; cudaMemcpy(tmp, imagePixels, w * h * sizeof(int), cudaMemcpyDeviceToHost); //copy imagePixels into pathtracer->frameBuffer for(size_t i = 0; i < h; i++) { for(size_t j = 0; j < w; j++) { if(tmp[i * w + j]) { pathtracer->frameBuffer.update_pixel(white, j, i); } else { pathtracer->frameBuffer.update_pixel(black, j, i); } } } delete[] tmp; pathtracer->doneState(); } // Wrapper for lanching the render() kernel void gpuPathTracer::start_raytrace() { timer.start(); size_t numBlocks = (w * h + 512 -1)/512; render<<<numBlocks,512>>>(camera); cudaDeviceSynchronize(); timer.stop(); printf("[GPU Pathtracer]: finished rendering scene (%.4f sec)\n",timer.duration()); }
a368e22d9b8a15a6ff5f81a27b60c450e126be5b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parameters/dumping_functions.h" #include "common/include/forward_functions.cuh" namespace SparseOperationKit { void get_hash_value(size_t count, size_t embedding_vec_size, const size_t *value_index, const float *embedding_table, float *value_retrieved, hipStream_t stream) { const size_t block_size = embedding_vec_size; const size_t grid_size = count; hipLaunchKernelGGL(( HugeCTR::get_hash_value_kernel), dim3(grid_size), dim3(block_size), 0, stream, count, embedding_vec_size, value_index, embedding_table, value_retrieved); } } // namespace SparseOperationKit
a368e22d9b8a15a6ff5f81a27b60c450e126be5b.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parameters/dumping_functions.h" #include "common/include/forward_functions.cuh" namespace SparseOperationKit { void get_hash_value(size_t count, size_t embedding_vec_size, const size_t *value_index, const float *embedding_table, float *value_retrieved, cudaStream_t stream) { const size_t block_size = embedding_vec_size; const size_t grid_size = count; HugeCTR::get_hash_value_kernel<<<grid_size, block_size, 0, stream>>>(count, embedding_vec_size, value_index, embedding_table, value_retrieved); } } // namespace SparseOperationKit
8f19ecabc92c12cb9db4a8cf1e04127719c5bfd4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * GStreamer * Copyright (C) <1999> Erik Walthinsen <[email protected]> * Copyright (C) <2003> David Schleef <[email protected]> * Copyright (C) <2012> Mikhail Durnev <[email protected]> * Copyright (C) <2014> Mikhail Durnev <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Alternatively, the contents of this file may be used under the * GNU Lesser General Public License Version 2.1 (the "LGPL"), in * which case the following provisions apply instead of the ones * mentioned above: * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ /** * SECTION:element-plugin * * FIXME:Describe plugin here. * * <refsect2> * <title>Example launch line</title> * |[ * gst-launch -v -m videotestsrc ! plugin ! autovideosink * ]| * </refsect2> */ #ifdef HAVE_CONFIG_H #include "../../common/config.h" #endif #include <gst/gst.h> #include <gst/video/video.h> #include <gst/video/gstvideofilter.h> #include <string.h> #define CUDA_CHECK_RETURN(value) { \ hipError_t stat = value; \ if (stat != hipSuccess) { \ GST_DEBUG("Error %s at line %d in file %s\n", \ hipGetErrorString(stat), __LINE__, __FILE__); \ } } typedef unsigned int uint32_t; #define PLAGIN_NAME "cudalens" #define PLAGIN_SHORT_DESCRIPTION "CUDA lens Filter" GST_DEBUG_CATEGORY_STATIC (gst_plugin_template_debug); #define GST_CAT_DEFAULT gst_plugin_template_debug typedef struct _GstPlugincudalens GstPlugincudalens; typedef struct _GstPlugincudalensClass GstPlugincudalensClass; #define GST_TYPE_PLUGIN_TEMPLATE \ (gst_plugin_template_get_type()) #define GST_PLUGIN_TEMPLATE(obj) \ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_PLUGIN_TEMPLATE,GstPlugincudalens)) #define GST_PLUGIN_TEMPLATE_CLASS(klass) \ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_PLUGIN_TEMPLATE,GstPlugincudalensClass)) #define GST_IS_PLUGIN_TEMPLATE(obj) \ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_PLUGIN_TEMPLATE)) #define GST_IS_PLUGIN_TEMPLATE_CLASS(klass) \ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_PLUGIN_TEMPLATE)) struct _GstPlugincudalens { GstVideoFilter videofilter; gint width; gint height; gfloat factor; // Barrel distortion compensation matrix hipArray* barrel_idx; hipTextureObject_t barrel_idx_tex; hipArray* afterpoint; hipTextureObject_t afterpoint_tex; }; struct _GstPlugincudalensClass { GstVideoFilterClass parent_class; }; enum { /* FILL ME */ LAST_SIGNAL }; enum { PROP_0, PROP_FACTOR }; /* debug category for fltering log messages */ #define DEBUG_INIT(bla) \ GST_DEBUG_CATEGORY_INIT (gst_plugin_template_debug, PLAGIN_NAME, 0, PLAGIN_SHORT_DESCRIPTION); GST_BOILERPLATE_FULL (GstPlugincudalens, gst_plugin_template, GstVideoFilter, GST_TYPE_VIDEO_FILTER, DEBUG_INIT); static void gst_plugin_template_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); static void gst_plugin_template_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); static void gst_plugin_template_finalize (GObject * object); static gboolean gst_plugin_template_set_caps (GstBaseTransform * bt, GstCaps * incaps, GstCaps * outcaps); //static GstFlowReturn gst_plugin_template_filter (GstBaseTransform * bt, // GstBuffer * outbuf, GstBuffer * inbuf); static GstFlowReturn gst_plugin_template_filter_inplace (GstBaseTransform * base_transform, GstBuffer * buf); #define ALLOWED_CAPS_STRING \ GST_VIDEO_CAPS_BGRx static GstStaticPadTemplate gst_video_filter_src_template = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS (ALLOWED_CAPS_STRING) ); static GstStaticPadTemplate gst_video_filter_sink_template = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, GST_STATIC_CAPS (ALLOWED_CAPS_STRING) ); /* GObject method implementations */ static void gst_plugin_template_base_init (gpointer klass) { GstElementClass *element_class = GST_ELEMENT_CLASS (klass); GstVideoFilterClass *videofilter_class = GST_VIDEO_FILTER_CLASS (klass); GstCaps *caps; gst_element_class_set_details_simple (element_class, PLAGIN_NAME, "Filter/Effect/Video", "Removes fisheye", "Mikhail Durnev <[email protected]>"); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&gst_video_filter_sink_template)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&gst_video_filter_src_template)); } static void gst_plugin_template_class_init (GstPlugincudalensClass * klass) { GObjectClass *gobject_class; GstBaseTransformClass *btrans_class; GstVideoFilterClass *video_filter_class; gobject_class = (GObjectClass *) klass; btrans_class = (GstBaseTransformClass *) klass; video_filter_class = (GstVideoFilterClass *) klass; gobject_class->set_property = gst_plugin_template_set_property; gobject_class->get_property = gst_plugin_template_get_property; gobject_class->finalize = gst_plugin_template_finalize; g_object_class_install_property (gobject_class, PROP_FACTOR, g_param_spec_float ("factor", "Factor", "Factor = ", 0.0, 0.00001, 0.0000008, (GParamFlags)G_PARAM_READWRITE)); btrans_class->set_caps = gst_plugin_template_set_caps; btrans_class->transform = NULL; btrans_class->transform_ip = gst_plugin_template_filter_inplace; } static void gst_plugin_template_init (GstPlugincudalens * plugin_template, GstPlugincudalensClass * g_class) { GST_DEBUG ("init"); plugin_template->factor = 0.0000008; plugin_template->barrel_idx = NULL; plugin_template->afterpoint = NULL; plugin_template->barrel_idx_tex = 0; plugin_template->afterpoint_tex = 0; } static void gst_plugin_template_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstPlugincudalens *filter = GST_PLUGIN_TEMPLATE (object); GST_OBJECT_LOCK (filter); switch (prop_id) { case PROP_FACTOR: filter->factor = g_value_get_float (value); GST_DEBUG("factor = %.8f\n", (double)filter->factor); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } GST_OBJECT_UNLOCK (filter); } static void gst_plugin_template_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { GstPlugincudalens *filter = GST_PLUGIN_TEMPLATE (object); GST_OBJECT_LOCK (filter); switch (prop_id) { case PROP_FACTOR: g_value_set_float (value, filter->factor); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } GST_OBJECT_UNLOCK (filter); } static void gst_plugin_template_finalize (GObject * object) { GstPlugincudalens *filter = GST_PLUGIN_TEMPLATE (object); if (filter->barrel_idx != NULL) { CUDA_CHECK_RETURN(hipFreeArray(filter->barrel_idx)); CUDA_CHECK_RETURN(hipFreeArray(filter->afterpoint)); CUDA_CHECK_RETURN(hipDestroyTextureObject(filter->barrel_idx_tex)); CUDA_CHECK_RETURN(hipDestroyTextureObject(filter->afterpoint_tex)); } //G_OBJECT_CLASS (object)->finalize (object); GST_DEBUG("finalize"); } __global__ void fill_matrix(uint2* barrel_idx, size_t pitch1, float2* afterpoint, size_t pitch2, float factor, int width, int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; double k = (double)height / width / 3 * 4; k *= k; // move coordinate center to the center of the frame int x1 = x - (width >> 1); int y1 = y - (height >> 1); // compensate barrel distortion of the lens double d = 1 - ((x1 * x1) * k + y1 * y1) * factor; // calculate new coordinates double fx1 = x1 * d + (width >> 1); double fy1 = y1 * d + (height >> 1); // bilinear interpolation x1 = (int)fx1; y1 = (int)fy1; (*(float2*)((char*)afterpoint + (pitch2 * y + x * sizeof(float2)))).x = (float)(fx1 - x1); (*(float2*)((char*)afterpoint + (pitch2 * y + x * sizeof(float2)))).y = (float)(fy1 - y1); (*(uint2*)((char*)barrel_idx + (pitch1 * y + x * sizeof(uint2)))).x = x1; (*(uint2*)((char*)barrel_idx + (pitch1 * y + x * sizeof(uint2)))).y = y1; } static void calc_matrix(GstPlugincudalens* filter, int stride) { int width = filter->width; int height = filter->height; GST_DEBUG("width=%d, height=%d\n", width, height); if (filter->barrel_idx != NULL) { CUDA_CHECK_RETURN(hipFreeArray(filter->barrel_idx)); CUDA_CHECK_RETURN(hipFreeArray(filter->afterpoint)); CUDA_CHECK_RETURN(hipDestroyTextureObject(filter->barrel_idx_tex)); CUDA_CHECK_RETURN(hipDestroyTextureObject(filter->afterpoint_tex)); } hipChannelFormatDesc desci = hipCreateChannelDesc<uint2>(); hipChannelFormatDesc descf = hipCreateChannelDesc<float2>(); CUDA_CHECK_RETURN(hipMallocArray(&filter->barrel_idx, &desci, width, height)); CUDA_CHECK_RETURN(hipMallocArray(&filter->afterpoint, &descf, width, height)); hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = filter->barrel_idx; hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeWrap; texDesc.addressMode[1] = hipAddressModeWrap; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; CUDA_CHECK_RETURN(hipCreateTextureObject(&filter->barrel_idx_tex, &resDesc, &texDesc, NULL)); memset(&resDesc, 0, sizeof(resDesc)); resDesc.res.array.array = filter->afterpoint; CUDA_CHECK_RETURN(hipCreateTextureObject(&filter->afterpoint_tex, &resDesc, &texDesc, NULL)); size_t pitch1, pitch2; void* barrel_idx; void* afterpoint; CUDA_CHECK_RETURN(hipMallocPitch(&barrel_idx, &pitch1, width * sizeof(uint2), height)); CUDA_CHECK_RETURN(hipMallocPitch(&afterpoint, &pitch2, width * sizeof(float2), height)); // fill barrel distortion matrix dim3 dimBlock(16, 16); dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( fill_matrix), dim3(dimGrid), dim3(dimBlock), 0, 0, (uint2*)barrel_idx, pitch1, (float2*)afterpoint, pitch2, filter->factor, width, height); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); CUDA_CHECK_RETURN(hipMemcpy2DToArray(filter->barrel_idx, 0, 0, barrel_idx, pitch1, width * sizeof(uint2), height, hipMemcpyDeviceToDevice)); CUDA_CHECK_RETURN(hipMemcpy2DToArray(filter->afterpoint, 0, 0, afterpoint, pitch2, width * sizeof(float2), height, hipMemcpyDeviceToDevice)); } static gboolean gst_plugin_template_set_caps (GstBaseTransform * bt, GstCaps * incaps, GstCaps * outcaps) { GstPlugincudalens *plugin_template; GstStructure *structure = NULL; gboolean ret = FALSE; plugin_template = GST_PLUGIN_TEMPLATE (bt); structure = gst_caps_get_structure (incaps, 0); GST_OBJECT_LOCK (plugin_template); if (gst_structure_get_int (structure, "width", &plugin_template->width) && gst_structure_get_int (structure, "height", &plugin_template->height)) { /* Check width and height and modify other plugin_template members accordingly */ /* Calculate distortion compensation matrix */ calc_matrix(plugin_template, plugin_template->width * 4); ret = TRUE; } GST_OBJECT_UNLOCK (plugin_template); return ret; } __global__ void video_filter(hipTextureObject_t in, uchar4* out, size_t pitch, int width, int height, hipTextureObject_t barrel_idx_tex, hipTextureObject_t afterpoint_tex) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; uint2 c = tex2D<uint2>(barrel_idx_tex, x, y); uchar4 v1 = tex2D<uchar4>(in, c.x, c.y); uchar4 v2 = tex2D<uchar4>(in, c.x, c.y + 1); uchar4 v3 = tex2D<uchar4>(in, c.x + 1, c.y); uchar4 v4 = tex2D<uchar4>(in, c.x + 1, c.y + 1); float2 f = tex2D<float2>(afterpoint_tex, x, y); float4 a, b; a.x = v1.x + (v3.x - v1.x) * f.x; a.y = v1.y + (v3.y - v1.y) * f.x; a.z = v1.z + (v3.z - v1.z) * f.x; a.w = v1.w + (v3.w - v1.w) * f.x; b.x = v2.x + (v4.x - v2.x) * f.x; b.y = v2.y + (v4.y - v2.y) * f.x; b.z = v2.z + (v4.z - v2.z) * f.x; b.w = v2.w + (v4.w - v2.w) * f.x; uchar4 v; v.x = (unsigned char)(a.x + (b.x - a.x) * f.y); v.y = (unsigned char)(a.y + (b.y - a.y) * f.y); v.z = (unsigned char)(a.z + (b.z - a.z) * f.y); v.w = (unsigned char)(a.w + (b.w - a.w) * f.y); *(uchar4*)((char*)out + (pitch * y + x * sizeof(uchar4))) = v; } static GstFlowReturn gst_plugin_template_filter_inplace (GstBaseTransform * base_transform, GstBuffer * buf) { GstPlugincudalens *plugin_template = GST_PLUGIN_TEMPLATE (base_transform); GstVideoFilter *videofilter = GST_VIDEO_FILTER (base_transform); gint width = plugin_template->width; gint height = plugin_template->height; gint stride = width * 4; unsigned long long *in = (unsigned long long *) GST_BUFFER_DATA (buf); /* * in[0] - device pointer to the allocated memory * in[1] - pitch in bytes * in[2] - texture object * in[3] - device memory allocated for image processing * in[4] - pitch in bytes * in[5] - texture object */ dim3 dimBlock(16, 16); dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( video_filter), dim3(dimGrid), dim3(dimBlock), 0, 0, (hipTextureObject_t)in[2], (uchar4*)in[3], (size_t)in[4], width, height, plugin_template->barrel_idx_tex, plugin_template->afterpoint_tex); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); // Swap buffers int i; for (i = 0; i < 3; i++) { unsigned long long x = in[i]; in[i] = in[i + 3]; in[i + 3] = x; } return GST_FLOW_OK; } static gboolean plugin_init (GstPlugin * plugin) { return gst_element_register (plugin, PLAGIN_NAME, GST_RANK_NONE, GST_TYPE_PLUGIN_TEMPLATE); } /* gstreamer looks for this structure to register plugins */ GST_PLUGIN_DEFINE ( GST_VERSION_MAJOR, GST_VERSION_MINOR, PLAGIN_NAME, PLAGIN_SHORT_DESCRIPTION, plugin_init, VERSION, "LGPL", "GStreamer", "http://gstreamer.net/" ); void test_plugin() { GstPlugincudalens data; data.width = 640; data.height = 480; data.barrel_idx = NULL; data.afterpoint = NULL; data.barrel_idx_tex = 0; data.afterpoint_tex = 0; data.factor = 0.0000008; int stride = data.width * 4; calc_matrix(&data, stride); void *in = NULL; size_t in_pitch; CUDA_CHECK_RETURN(hipMallocPitch(&in, &in_pitch, stride, data.height)); void *out = NULL; size_t out_pitch; CUDA_CHECK_RETURN(hipMallocPitch(&out, &out_pitch, stride, data.height)); hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypePitch2D; resDesc.res.pitch2D.devPtr = in; resDesc.res.pitch2D.desc = hipCreateChannelDesc<uchar4>(); resDesc.res.pitch2D.pitchInBytes = in_pitch; resDesc.res.pitch2D.width = stride; resDesc.res.pitch2D.height = data.height; hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeWrap; texDesc.addressMode[1] = hipAddressModeWrap; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; texDesc.normalizedCoords = 0; hipTextureObject_t tex = 0; CUDA_CHECK_RETURN(hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL)); dim3 dimBlock(16, 16); dim3 dimGrid((data.width + dimBlock.x - 1) / dimBlock.x, (data.height + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( video_filter), dim3(dimGrid), dim3(dimBlock), 0, 0, tex, (uchar4*)out, out_pitch, data.width, data.height, data.barrel_idx_tex, data.afterpoint_tex); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); CUDA_CHECK_RETURN(hipDestroyTextureObject(tex)); CUDA_CHECK_RETURN(hipFree(in)); CUDA_CHECK_RETURN(hipFree(out)); CUDA_CHECK_RETURN(hipFreeArray(data.barrel_idx)); CUDA_CHECK_RETURN(hipFreeArray(data.afterpoint)); CUDA_CHECK_RETURN(hipDestroyTextureObject(data.barrel_idx_tex)); CUDA_CHECK_RETURN(hipDestroyTextureObject(data.afterpoint_tex)); }
8f19ecabc92c12cb9db4a8cf1e04127719c5bfd4.cu
/* * GStreamer * Copyright (C) <1999> Erik Walthinsen <[email protected]> * Copyright (C) <2003> David Schleef <[email protected]> * Copyright (C) <2012> Mikhail Durnev <[email protected]> * Copyright (C) <2014> Mikhail Durnev <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Alternatively, the contents of this file may be used under the * GNU Lesser General Public License Version 2.1 (the "LGPL"), in * which case the following provisions apply instead of the ones * mentioned above: * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ /** * SECTION:element-plugin * * FIXME:Describe plugin here. * * <refsect2> * <title>Example launch line</title> * |[ * gst-launch -v -m videotestsrc ! plugin ! autovideosink * ]| * </refsect2> */ #ifdef HAVE_CONFIG_H #include "../../common/config.h" #endif #include <gst/gst.h> #include <gst/video/video.h> #include <gst/video/gstvideofilter.h> #include <string.h> #define CUDA_CHECK_RETURN(value) { \ cudaError_t stat = value; \ if (stat != cudaSuccess) { \ GST_DEBUG("Error %s at line %d in file %s\n", \ cudaGetErrorString(stat), __LINE__, __FILE__); \ } } typedef unsigned int uint32_t; #define PLAGIN_NAME "cudalens" #define PLAGIN_SHORT_DESCRIPTION "CUDA lens Filter" GST_DEBUG_CATEGORY_STATIC (gst_plugin_template_debug); #define GST_CAT_DEFAULT gst_plugin_template_debug typedef struct _GstPlugincudalens GstPlugincudalens; typedef struct _GstPlugincudalensClass GstPlugincudalensClass; #define GST_TYPE_PLUGIN_TEMPLATE \ (gst_plugin_template_get_type()) #define GST_PLUGIN_TEMPLATE(obj) \ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_PLUGIN_TEMPLATE,GstPlugincudalens)) #define GST_PLUGIN_TEMPLATE_CLASS(klass) \ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_PLUGIN_TEMPLATE,GstPlugincudalensClass)) #define GST_IS_PLUGIN_TEMPLATE(obj) \ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_PLUGIN_TEMPLATE)) #define GST_IS_PLUGIN_TEMPLATE_CLASS(klass) \ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_PLUGIN_TEMPLATE)) struct _GstPlugincudalens { GstVideoFilter videofilter; gint width; gint height; gfloat factor; // Barrel distortion compensation matrix cudaArray* barrel_idx; cudaTextureObject_t barrel_idx_tex; cudaArray* afterpoint; cudaTextureObject_t afterpoint_tex; }; struct _GstPlugincudalensClass { GstVideoFilterClass parent_class; }; enum { /* FILL ME */ LAST_SIGNAL }; enum { PROP_0, PROP_FACTOR }; /* debug category for fltering log messages */ #define DEBUG_INIT(bla) \ GST_DEBUG_CATEGORY_INIT (gst_plugin_template_debug, PLAGIN_NAME, 0, PLAGIN_SHORT_DESCRIPTION); GST_BOILERPLATE_FULL (GstPlugincudalens, gst_plugin_template, GstVideoFilter, GST_TYPE_VIDEO_FILTER, DEBUG_INIT); static void gst_plugin_template_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); static void gst_plugin_template_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); static void gst_plugin_template_finalize (GObject * object); static gboolean gst_plugin_template_set_caps (GstBaseTransform * bt, GstCaps * incaps, GstCaps * outcaps); //static GstFlowReturn gst_plugin_template_filter (GstBaseTransform * bt, // GstBuffer * outbuf, GstBuffer * inbuf); static GstFlowReturn gst_plugin_template_filter_inplace (GstBaseTransform * base_transform, GstBuffer * buf); #define ALLOWED_CAPS_STRING \ GST_VIDEO_CAPS_BGRx static GstStaticPadTemplate gst_video_filter_src_template = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS (ALLOWED_CAPS_STRING) ); static GstStaticPadTemplate gst_video_filter_sink_template = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, GST_STATIC_CAPS (ALLOWED_CAPS_STRING) ); /* GObject method implementations */ static void gst_plugin_template_base_init (gpointer klass) { GstElementClass *element_class = GST_ELEMENT_CLASS (klass); GstVideoFilterClass *videofilter_class = GST_VIDEO_FILTER_CLASS (klass); GstCaps *caps; gst_element_class_set_details_simple (element_class, PLAGIN_NAME, "Filter/Effect/Video", "Removes fisheye", "Mikhail Durnev <[email protected]>"); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&gst_video_filter_sink_template)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&gst_video_filter_src_template)); } static void gst_plugin_template_class_init (GstPlugincudalensClass * klass) { GObjectClass *gobject_class; GstBaseTransformClass *btrans_class; GstVideoFilterClass *video_filter_class; gobject_class = (GObjectClass *) klass; btrans_class = (GstBaseTransformClass *) klass; video_filter_class = (GstVideoFilterClass *) klass; gobject_class->set_property = gst_plugin_template_set_property; gobject_class->get_property = gst_plugin_template_get_property; gobject_class->finalize = gst_plugin_template_finalize; g_object_class_install_property (gobject_class, PROP_FACTOR, g_param_spec_float ("factor", "Factor", "Factor = ", 0.0, 0.00001, 0.0000008, (GParamFlags)G_PARAM_READWRITE)); btrans_class->set_caps = gst_plugin_template_set_caps; btrans_class->transform = NULL; btrans_class->transform_ip = gst_plugin_template_filter_inplace; } static void gst_plugin_template_init (GstPlugincudalens * plugin_template, GstPlugincudalensClass * g_class) { GST_DEBUG ("init"); plugin_template->factor = 0.0000008; plugin_template->barrel_idx = NULL; plugin_template->afterpoint = NULL; plugin_template->barrel_idx_tex = 0; plugin_template->afterpoint_tex = 0; } static void gst_plugin_template_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstPlugincudalens *filter = GST_PLUGIN_TEMPLATE (object); GST_OBJECT_LOCK (filter); switch (prop_id) { case PROP_FACTOR: filter->factor = g_value_get_float (value); GST_DEBUG("factor = %.8f\n", (double)filter->factor); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } GST_OBJECT_UNLOCK (filter); } static void gst_plugin_template_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { GstPlugincudalens *filter = GST_PLUGIN_TEMPLATE (object); GST_OBJECT_LOCK (filter); switch (prop_id) { case PROP_FACTOR: g_value_set_float (value, filter->factor); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } GST_OBJECT_UNLOCK (filter); } static void gst_plugin_template_finalize (GObject * object) { GstPlugincudalens *filter = GST_PLUGIN_TEMPLATE (object); if (filter->barrel_idx != NULL) { CUDA_CHECK_RETURN(cudaFreeArray(filter->barrel_idx)); CUDA_CHECK_RETURN(cudaFreeArray(filter->afterpoint)); CUDA_CHECK_RETURN(cudaDestroyTextureObject(filter->barrel_idx_tex)); CUDA_CHECK_RETURN(cudaDestroyTextureObject(filter->afterpoint_tex)); } //G_OBJECT_CLASS (object)->finalize (object); GST_DEBUG("finalize"); } __global__ void fill_matrix(uint2* barrel_idx, size_t pitch1, float2* afterpoint, size_t pitch2, float factor, int width, int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; double k = (double)height / width / 3 * 4; k *= k; // move coordinate center to the center of the frame int x1 = x - (width >> 1); int y1 = y - (height >> 1); // compensate barrel distortion of the lens double d = 1 - ((x1 * x1) * k + y1 * y1) * factor; // calculate new coordinates double fx1 = x1 * d + (width >> 1); double fy1 = y1 * d + (height >> 1); // bilinear interpolation x1 = (int)fx1; y1 = (int)fy1; (*(float2*)((char*)afterpoint + (pitch2 * y + x * sizeof(float2)))).x = (float)(fx1 - x1); (*(float2*)((char*)afterpoint + (pitch2 * y + x * sizeof(float2)))).y = (float)(fy1 - y1); (*(uint2*)((char*)barrel_idx + (pitch1 * y + x * sizeof(uint2)))).x = x1; (*(uint2*)((char*)barrel_idx + (pitch1 * y + x * sizeof(uint2)))).y = y1; } static void calc_matrix(GstPlugincudalens* filter, int stride) { int width = filter->width; int height = filter->height; GST_DEBUG("width=%d, height=%d\n", width, height); if (filter->barrel_idx != NULL) { CUDA_CHECK_RETURN(cudaFreeArray(filter->barrel_idx)); CUDA_CHECK_RETURN(cudaFreeArray(filter->afterpoint)); CUDA_CHECK_RETURN(cudaDestroyTextureObject(filter->barrel_idx_tex)); CUDA_CHECK_RETURN(cudaDestroyTextureObject(filter->afterpoint_tex)); } cudaChannelFormatDesc desci = cudaCreateChannelDesc<uint2>(); cudaChannelFormatDesc descf = cudaCreateChannelDesc<float2>(); CUDA_CHECK_RETURN(cudaMallocArray(&filter->barrel_idx, &desci, width, height)); CUDA_CHECK_RETURN(cudaMallocArray(&filter->afterpoint, &descf, width, height)); cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = filter->barrel_idx; cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeWrap; texDesc.addressMode[1] = cudaAddressModeWrap; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; CUDA_CHECK_RETURN(cudaCreateTextureObject(&filter->barrel_idx_tex, &resDesc, &texDesc, NULL)); memset(&resDesc, 0, sizeof(resDesc)); resDesc.res.array.array = filter->afterpoint; CUDA_CHECK_RETURN(cudaCreateTextureObject(&filter->afterpoint_tex, &resDesc, &texDesc, NULL)); size_t pitch1, pitch2; void* barrel_idx; void* afterpoint; CUDA_CHECK_RETURN(cudaMallocPitch(&barrel_idx, &pitch1, width * sizeof(uint2), height)); CUDA_CHECK_RETURN(cudaMallocPitch(&afterpoint, &pitch2, width * sizeof(float2), height)); // fill barrel distortion matrix dim3 dimBlock(16, 16); dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y); fill_matrix<<<dimGrid, dimBlock>>>((uint2*)barrel_idx, pitch1, (float2*)afterpoint, pitch2, filter->factor, width, height); CUDA_CHECK_RETURN(cudaThreadSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaMemcpy2DToArray(filter->barrel_idx, 0, 0, barrel_idx, pitch1, width * sizeof(uint2), height, cudaMemcpyDeviceToDevice)); CUDA_CHECK_RETURN(cudaMemcpy2DToArray(filter->afterpoint, 0, 0, afterpoint, pitch2, width * sizeof(float2), height, cudaMemcpyDeviceToDevice)); } static gboolean gst_plugin_template_set_caps (GstBaseTransform * bt, GstCaps * incaps, GstCaps * outcaps) { GstPlugincudalens *plugin_template; GstStructure *structure = NULL; gboolean ret = FALSE; plugin_template = GST_PLUGIN_TEMPLATE (bt); structure = gst_caps_get_structure (incaps, 0); GST_OBJECT_LOCK (plugin_template); if (gst_structure_get_int (structure, "width", &plugin_template->width) && gst_structure_get_int (structure, "height", &plugin_template->height)) { /* Check width and height and modify other plugin_template members accordingly */ /* Calculate distortion compensation matrix */ calc_matrix(plugin_template, plugin_template->width * 4); ret = TRUE; } GST_OBJECT_UNLOCK (plugin_template); return ret; } __global__ void video_filter(cudaTextureObject_t in, uchar4* out, size_t pitch, int width, int height, cudaTextureObject_t barrel_idx_tex, cudaTextureObject_t afterpoint_tex) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; uint2 c = tex2D<uint2>(barrel_idx_tex, x, y); uchar4 v1 = tex2D<uchar4>(in, c.x, c.y); uchar4 v2 = tex2D<uchar4>(in, c.x, c.y + 1); uchar4 v3 = tex2D<uchar4>(in, c.x + 1, c.y); uchar4 v4 = tex2D<uchar4>(in, c.x + 1, c.y + 1); float2 f = tex2D<float2>(afterpoint_tex, x, y); float4 a, b; a.x = v1.x + (v3.x - v1.x) * f.x; a.y = v1.y + (v3.y - v1.y) * f.x; a.z = v1.z + (v3.z - v1.z) * f.x; a.w = v1.w + (v3.w - v1.w) * f.x; b.x = v2.x + (v4.x - v2.x) * f.x; b.y = v2.y + (v4.y - v2.y) * f.x; b.z = v2.z + (v4.z - v2.z) * f.x; b.w = v2.w + (v4.w - v2.w) * f.x; uchar4 v; v.x = (unsigned char)(a.x + (b.x - a.x) * f.y); v.y = (unsigned char)(a.y + (b.y - a.y) * f.y); v.z = (unsigned char)(a.z + (b.z - a.z) * f.y); v.w = (unsigned char)(a.w + (b.w - a.w) * f.y); *(uchar4*)((char*)out + (pitch * y + x * sizeof(uchar4))) = v; } static GstFlowReturn gst_plugin_template_filter_inplace (GstBaseTransform * base_transform, GstBuffer * buf) { GstPlugincudalens *plugin_template = GST_PLUGIN_TEMPLATE (base_transform); GstVideoFilter *videofilter = GST_VIDEO_FILTER (base_transform); gint width = plugin_template->width; gint height = plugin_template->height; gint stride = width * 4; unsigned long long *in = (unsigned long long *) GST_BUFFER_DATA (buf); /* * in[0] - device pointer to the allocated memory * in[1] - pitch in bytes * in[2] - texture object * in[3] - device memory allocated for image processing * in[4] - pitch in bytes * in[5] - texture object */ dim3 dimBlock(16, 16); dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y); video_filter<<<dimGrid, dimBlock>>>((cudaTextureObject_t)in[2], (uchar4*)in[3], (size_t)in[4], width, height, plugin_template->barrel_idx_tex, plugin_template->afterpoint_tex); CUDA_CHECK_RETURN(cudaThreadSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); // Swap buffers int i; for (i = 0; i < 3; i++) { unsigned long long x = in[i]; in[i] = in[i + 3]; in[i + 3] = x; } return GST_FLOW_OK; } static gboolean plugin_init (GstPlugin * plugin) { return gst_element_register (plugin, PLAGIN_NAME, GST_RANK_NONE, GST_TYPE_PLUGIN_TEMPLATE); } /* gstreamer looks for this structure to register plugins */ GST_PLUGIN_DEFINE ( GST_VERSION_MAJOR, GST_VERSION_MINOR, PLAGIN_NAME, PLAGIN_SHORT_DESCRIPTION, plugin_init, VERSION, "LGPL", "GStreamer", "http://gstreamer.net/" ); void test_plugin() { GstPlugincudalens data; data.width = 640; data.height = 480; data.barrel_idx = NULL; data.afterpoint = NULL; data.barrel_idx_tex = 0; data.afterpoint_tex = 0; data.factor = 0.0000008; int stride = data.width * 4; calc_matrix(&data, stride); void *in = NULL; size_t in_pitch; CUDA_CHECK_RETURN(cudaMallocPitch(&in, &in_pitch, stride, data.height)); void *out = NULL; size_t out_pitch; CUDA_CHECK_RETURN(cudaMallocPitch(&out, &out_pitch, stride, data.height)); cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypePitch2D; resDesc.res.pitch2D.devPtr = in; resDesc.res.pitch2D.desc = cudaCreateChannelDesc<uchar4>(); resDesc.res.pitch2D.pitchInBytes = in_pitch; resDesc.res.pitch2D.width = stride; resDesc.res.pitch2D.height = data.height; cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeWrap; texDesc.addressMode[1] = cudaAddressModeWrap; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords = 0; cudaTextureObject_t tex = 0; CUDA_CHECK_RETURN(cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL)); dim3 dimBlock(16, 16); dim3 dimGrid((data.width + dimBlock.x - 1) / dimBlock.x, (data.height + dimBlock.y - 1) / dimBlock.y); video_filter<<<dimGrid, dimBlock>>>(tex, (uchar4*)out, out_pitch, data.width, data.height, data.barrel_idx_tex, data.afterpoint_tex); CUDA_CHECK_RETURN(cudaThreadSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaDestroyTextureObject(tex)); CUDA_CHECK_RETURN(cudaFree(in)); CUDA_CHECK_RETURN(cudaFree(out)); CUDA_CHECK_RETURN(cudaFreeArray(data.barrel_idx)); CUDA_CHECK_RETURN(cudaFreeArray(data.afterpoint)); CUDA_CHECK_RETURN(cudaDestroyTextureObject(data.barrel_idx_tex)); CUDA_CHECK_RETURN(cudaDestroyTextureObject(data.afterpoint_tex)); }
c925b1acbc6bd553a366613a0ba2d5e53159558b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void SweHInit(double *var_in1, double *var_in2, double *var_out, int size) { // Get thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < size) { // Transfer data and memory and calculation var_out[tid] = var_in1[tid] - var_in2[tid]; // Thread id update tid += blockDim.x * gridDim.x; } }
c925b1acbc6bd553a366613a0ba2d5e53159558b.cu
#include "includes.h" __global__ void SweHInit(double *var_in1, double *var_in2, double *var_out, int size) { // Get thread id int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < size) { // Transfer data and memory and calculation var_out[tid] = var_in1[tid] - var_in2[tid]; // Thread id update tid += blockDim.x * gridDim.x; } }
e681c511208f067962380a70e54c5bee62fd53b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* File: cuda_kmeans.cu (CUDA version) */ /* Description: Implementation of simple k-means clustering algorithm */ /* This program takes an array of N data objects, each with */ /* M coordinates and performs a k-means clustering given a */ /* user-provided value of the number of clusters (K). The */ /* clustering results are saved in 2 arrays: */ /* 1. a returned array of size [K][N] indicating the center */ /* coordinates of K clusters */ /* 2. membership[N] stores the cluster center ids, each */ /* corresponding to the cluster a data object is assigned */ /* */ /* Author: Wei-keng Liao */ /* ECE Department, Northwestern University */ /* email: [email protected] */ /* Copyright, 2005, Wei-keng Liao */ /* */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ // Copyright (c) 2005 Wei-keng Liao // Copyright (c) 2011 Serban Giuroiu // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // ----------------------------------------------------------------------------- #define BLOCKSIZE2 1024 //#define OUTPUT_SIZE //#define OUTPUT_TIME #define NUMBER 8 #define EXTRA 1 //#define OUTPUT_RESULT #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "kmeans.h" /* static inline int nextPowerOfTwo(int n) { n--; n = n >> 1 | n; n = n >> 2 | n; n = n >> 4 | n; n = n >> 8 | n; n = n >> 16 | n; // n = n >> 32 | n; // For 64-bit ints return ++n; }*/ /*----< find_nearest_cluster() >---------------------------------------------*/ __global__ static void find_nearest_cluster(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *deviceClusters, // [numCoords][numClusters] float *deviceNewCluster,// [numCoords][numClusters] int *deviceNewClusterSize, //[numClusters] int *membership, // [numObjs] int *intermediates) { extern __shared__ char sharedMemory[]; unsigned int tid = threadIdx.x; // The type chosen for membershipChanged must be large enough to support // reductions! There are blockDim.x elements, one for each thread in the // block. // unsigned char *membershipChanged = (unsigned char *)sharedMemory; float *clusters = (float *)(sharedMemory );//+ blockDim.x); // membershipChanged[tid] = 0; // BEWARE: We can overrun our shared memory here if there are too many // clusters or too many coordinates! // using CUDA unroll #pragma unroll for (int i = tid; i < numClusters; i += blockDim.x) { for (int j = 0; j < numCoords; j++) { clusters[(numClusters+1) * j + i] = deviceClusters[numClusters * j + i]; } } __syncthreads(); int objectId = blockDim.x * blockIdx.x + tid; if (objectId < numObjs) { int index; float dist, min_dist=1e20; /* find the cluster id that has min distance to object */ index = 0; for (int i=0;i<numClusters;i++){ dist = 0; #pragma unroll for (int j = 0; j < numCoords; j++) { float x = objects[numObjs * j + objectId]; float y = clusters[(numClusters+1) * j + i]; dist += (x-y)*(x-y); } if (dist<min_dist){ min_dist = dist; index = i; } } //use atomic add to calculate new clusters. Phase 1 : sum up #pragma unroll for (int j=0; j<numCoords; j++) atomicAdd(&deviceNewCluster[j*numClusters + index], objects[j*numObjs+objectId]); // assign the membership to object objectId if (membership[objectId] != index) atomicAdd(&intermediates[0],1); membership[objectId] = index; //use atomic add to calculate new clusters. Phase 2 : sum up cluster size atomicAdd(&deviceNewClusterSize[index],1); } } // special treatement for nCoords = 22 __global__ static void find_nearest_cluster_666(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *deviceClusters, // [numCoords][numClusters] float *deviceNewCluster,// [numCoords][numClusters] int *deviceNewClusterSize, //[numClusters] int *membership, // [numObjs] int *intermediates) { extern __shared__ char sharedMemory[]; unsigned int tid = threadIdx.x; // The type chosen for membershipChanged must be large enough to support // reductions! There are blockDim.x elements, one for each thread in the // block. // unsigned char *membershipChanged = (unsigned char *)sharedMemory; float *clusters = (float *)(sharedMemory );//+ blockDim.x); // membershipChanged[tid] = 0; // BEWARE: We can overrun our shared memory here if there are too many // clusters or too many coordinates! // using CUDA unroll #pragma unroll for (int i = tid; i < numClusters; i += blockDim.x) { for (int j = 0; j < numCoords; j++) { clusters[(numClusters+1) * j + i] = deviceClusters[numClusters * j + i]; } } __syncthreads(); int objectId = blockDim.x * blockIdx.x + tid; if (objectId < numObjs) { int index; float dist, min_dist=1e20; /* find the cluster id that has min distance to object */ index = 0; for (int i=0;i<numClusters;i++){ dist = 0; #pragma unroll for (int j = 0; j < 22; j++) { float x = objects[numObjs * j + objectId]; float y = clusters[(numClusters+1) * j + i]; dist += (x-y)*(x-y); } if (dist<min_dist){ min_dist = dist; index = i; } } //use atomic add to calculate new clusters. Phase 1 : sum up #pragma unroll for (int j=0; j<numCoords; j++) atomicAdd(&deviceNewCluster[j*numClusters + index], objects[j*numObjs+objectId]); // assign the membership to object objectId if (membership[objectId] != index) atomicAdd(&intermediates[0],1); membership[objectId] = index; atomicAdd(&deviceNewClusterSize[index],1); } } // special treatement for nCoords = 8 __global__ static void find_nearest_cluster_2333(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *deviceClusters, // [numCoords][numClusters] float *deviceNewCluster,// [numCoords][numClusters] int *deviceNewClusterSize, //[numClusters] int *membership, // [numObjs] int *intermediates) { extern __shared__ char sharedMemory[]; unsigned int tid = threadIdx.x; // The type chosen for membershipChanged must be large enough to support // reductions! There are blockDim.x elements, one for each thread in the // block. // unsigned char *membershipChanged = (unsigned char *)sharedMemory; float *clusters = (float *)(sharedMemory );//+ blockDim.x); // membershipChanged[tid] = 0; // BEWARE: We can overrun our shared memory here if there are too many // clusters or too many coordinates! // using CUDA unroll #pragma unroll for (int i = tid; i < numClusters; i += blockDim.x) { for (int j = 0; j < numCoords; j++) { clusters[(numClusters+1) * j + i] = deviceClusters[numClusters * j + i]; } } __syncthreads(); int objectId = blockDim.x * blockIdx.x + tid; if (objectId < numObjs) { int index; float dist, min_dist=1e20; /* find the cluster id that has min distance to object */ index = 0; for (int i=0;i<numClusters;i++){ dist = 0; #pragma unroll for (int j = 0; j < 8; j++) { float x = objects[numObjs * j + objectId]; float y = clusters[(numClusters+1) * j + i]; dist += (x-y)*(x-y); } if (dist<min_dist){ min_dist = dist; index = i; } } //use atomic add to calculate new clusters. Phase 1 : sum up #pragma unroll for (int j=0; j<numCoords; j++) atomicAdd(&deviceNewCluster[j*numClusters + index], objects[j*numObjs+objectId]); // assign the membership to object objectId if (membership[objectId] != index) atomicAdd(&intermediates[0],1); membership[objectId] = index; atomicAdd(&deviceNewClusterSize[index],1); } } /*----< cuda_kmeans() >-------------------------------------------------------*/ // // ---------------------------------------- // DATA LAYOUT // // objects [numObjs][numCoords] // clusters [numClusters][numCoords] // dimObjects [numCoords][numObjs] // dimClusters [numCoords][numClusters] // newClusters [numCoords][numClusters] // deviceObjects [numCoords][numObjs] // deviceClusters [numCoords][numClusters] // ---------------------------------------- // /* return an array of cluster centers of size [numClusters][numCoords] */ float** cuda_kmeans(float **objects, /* in: [numObjs][numCoords] */ int numCoords, /* no. features */ int numObjs, /* no. objects */ int numClusters, /* no. clusters */ float threshold, /* % objects change membership */ int *membership, /* out: [numObjs] */ int *loop_iterations) { int i, j, index, loop=0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ float **dimObjects; float **clusters; /* out: [numClusters][numCoords] */ float **dimClusters; float **newClusters; /* [numCoords][numClusters] */ float *deviceObjects; float *deviceClusters; float *deviceNewCluster; //new int *deviceNewClusterSize; //new int *deviceMembership; int *deviceIntermediates; // Copy objects given in [numObjs][numCoords] layout to new // [numCoords][numObjs] layout malloc2D(dimObjects, numCoords, numObjs, float); #pragma omp parallel for for (i = 0; i < numCoords; i++) { for (j = 0; j < numObjs; j++) { dimObjects[i][j] = objects[j][i]; } } /* pick first numClusters elements of objects[] as initial cluster centers*/ malloc2D(dimClusters, numCoords, numClusters, float); #pragma omp parallel for for (i = 0; i < numCoords; i++) { for (j = 0; j < numClusters; j++) { dimClusters[i][j] = dimObjects[i][j]; } } /* initialize membership[] */ for (i=0; i<numObjs; i++) membership[i] = -1; /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int*) calloc(EXTRA*numClusters, sizeof(int)); assert(newClusterSize != NULL); malloc2D(newClusters, numCoords, numClusters, float); memset(newClusters[0], 0, numCoords * numClusters * sizeof(float)); // To support reduction, numThreadsPerClusterBlock *must* be a power of // two, and it *must* be no larger than the number of bits that will // fit into an unsigned char, the type used to keep track of membership // changes in the kernel. const unsigned int numThreadsPerClusterBlock = 128; const unsigned int numClusterBlocks = (numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock; const unsigned int clusterBlockSharedDataSize = // numThreadsPerClusterBlock * sizeof(unsigned char) + (numClusters+1) * numCoords * sizeof(float); const unsigned int numReductionThreads = 1; // nextPowerOfTwo(numClusterBlocks); // const unsigned int reductionBlockSharedDataSize = // numReductionThreads * sizeof(unsigned int); checkCuda(hipMalloc(&deviceObjects, numObjs*numCoords*sizeof(float))); checkCuda(hipMalloc(&deviceClusters, numClusters*numCoords*sizeof(float))); checkCuda(hipMalloc(&deviceMembership, numObjs*sizeof(int))); checkCuda(hipMalloc(&deviceIntermediates, numReductionThreads*sizeof(unsigned int))); checkCuda(hipMemset(deviceIntermediates, 0,numReductionThreads*sizeof(unsigned int))); checkCuda(hipMemcpy(deviceObjects, dimObjects[0], numObjs*numCoords*sizeof(float), hipMemcpyHostToDevice)); checkCuda(hipMemcpy(deviceMembership, membership, numObjs*sizeof(int), hipMemcpyHostToDevice)); checkCuda(hipMalloc(&deviceNewCluster, numClusters*numCoords*sizeof(float))); checkCuda(hipMalloc(&deviceNewClusterSize, EXTRA*numClusters * sizeof(int) )); checkCuda(hipMemset(deviceNewCluster, 0, numClusters*numCoords*sizeof(float))); checkCuda(hipMemset(deviceNewClusterSize, 0, EXTRA*numClusters * sizeof(int))); //out of the loop! checkCuda(hipMemcpy(deviceClusters, dimClusters[0], numClusters*numCoords*sizeof(float), hipMemcpyHostToDevice)); do { #ifdef OUTPUT_TIME struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); #endif #ifdef OUTPUT_SIZE printf("\nnumClusterBlocks = %d, numThreadPerCB = %d\n",numClusterBlocks,numThreadsPerClusterBlock); #endif if (numCoords == 8) hipLaunchKernelGGL(( find_nearest_cluster_2333) , dim3(numClusterBlocks), dim3(numThreadsPerClusterBlock), clusterBlockSharedDataSize, 0, numCoords, numObjs, numClusters, deviceObjects, deviceClusters, deviceNewCluster, deviceNewClusterSize, deviceMembership, deviceIntermediates); else if (numCoords == 22) hipLaunchKernelGGL(( find_nearest_cluster_666) , dim3(numClusterBlocks), dim3(numThreadsPerClusterBlock), clusterBlockSharedDataSize, 0, numCoords, numObjs, numClusters, deviceObjects, deviceClusters, deviceNewCluster, deviceNewClusterSize, deviceMembership, deviceIntermediates); else hipLaunchKernelGGL(( find_nearest_cluster) , dim3(numClusterBlocks), dim3(numThreadsPerClusterBlock), clusterBlockSharedDataSize , 0, numCoords, numObjs, numClusters, deviceObjects, deviceClusters, deviceNewCluster, deviceNewClusterSize, deviceMembership, deviceIntermediates); hipDeviceSynchronize(); checkLastCudaError(); #ifdef OUTPUT_TIME gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); printf("#%d %ld.%06ld\t",loop, (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); gettimeofday(&tval_before, NULL); #endif // Delta reduction int d; checkCuda(hipMemcpy(&d, deviceIntermediates, sizeof(int), hipMemcpyDeviceToHost)); delta = (float)d; checkCuda(hipMemset(deviceIntermediates,0, numReductionThreads*sizeof(unsigned int))); #ifdef OUTPUT_TIME gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); printf(" %ld.%06ld\t", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); gettimeofday(&tval_before, NULL); #endif // calculate new clusters, phase 3 checkCuda(hipMemcpy(newClusters[0], deviceNewCluster, numClusters*numCoords*sizeof(float), hipMemcpyDeviceToHost)); checkCuda(hipMemcpy(newClusterSize,deviceNewClusterSize, EXTRA*numClusters * sizeof(int), hipMemcpyDeviceToHost )); #ifdef OUTPUT_RESULT printf("Membership:\n"); #endif for (i=0; i<numClusters; i++) { #ifdef OUTPUT_RESULT printf("%d ",newClusterSize[i]); #endif for (j=0; j<numCoords; j++) { if (newClusterSize[i] > 0) dimClusters[j][i] = newClusters[j][i] / newClusterSize[i]; } } #ifdef OUTPUT_RESULT printf("\nClusters:\n"); for (i=0;i<numClusters;i++){ for (j=0;j<numCoords;j++) printf("%f ",dimClusters[j][i]); printf("\n"); } #endif checkCuda(hipMemcpy(deviceClusters, dimClusters[0], numClusters*numCoords*sizeof(float), hipMemcpyHostToDevice)); checkCuda(hipMemset(deviceNewCluster, 0, numClusters*numCoords*sizeof(float))); checkCuda(hipMemset(deviceNewClusterSize, 0, EXTRA*numClusters * sizeof(int))); #ifdef OUTPUT_TIME gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); printf(", %ld.%06ld\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); #endif delta /= numObjs; } while (delta > threshold && loop++ < 500); //====================================================== // calculate again to handle float-point errors checkCuda(hipMemcpy(membership, deviceMembership, numObjs*sizeof(int), hipMemcpyDeviceToHost)); for (i=0; i<numObjs; i++) { index = membership[i]; newClusterSize[index]++; for (j=0; j<numCoords; j++){ newClusters[j][index] += objects[i][j]; } } #ifdef OUTPUT_RESULT printf("Membership:\n"); #endif for (i=0; i<numClusters; i++) { #ifdef OUTPUT_RESULT printf("%d ",newClusterSize[i]); #endif for (j=0; j<numCoords; j++) { if (newClusterSize[i] > 0) dimClusters[j][i] = newClusters[j][i] / newClusterSize[i]; newClusters[j][i] = 0.0; } newClusterSize[i] = 0; } #ifdef OUTPUT_RESULT printf("\nClusters:\n"); for (i=0;i<numClusters;i++){ for (j=0;j<numCoords;j++) printf("%f ",dimClusters[j][i]); printf("\n"); } #endif checkCuda(hipMemcpy(deviceClusters, dimClusters[0], numClusters*numCoords*sizeof(float), hipMemcpyHostToDevice)); //===================================================== *loop_iterations = loop + 1; /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ malloc2D(clusters, numClusters, numCoords, float); //GPU -> mem checkCuda(hipMemcpy(dimClusters[0], deviceClusters, numClusters*numCoords*sizeof(float), hipMemcpyDeviceToHost)); for (i = 0; i < numClusters; i++) { for (j = 0; j < numCoords; j++) { clusters[i][j] = dimClusters[j][i]; //printf("%f ",clusters[i][j]); } // printf("\n"); } checkCuda(hipFree(deviceObjects)); checkCuda(hipFree(deviceClusters)); checkCuda(hipFree(deviceMembership)); checkCuda(hipFree(deviceIntermediates)); checkCuda(hipFree(deviceNewCluster)); checkCuda(hipFree(deviceNewClusterSize)); free(dimObjects[0]); free(dimObjects); free(dimClusters[0]); free(dimClusters); free(newClusters[0]); free(newClusters); free(newClusterSize); return clusters; }
e681c511208f067962380a70e54c5bee62fd53b6.cu
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* File: cuda_kmeans.cu (CUDA version) */ /* Description: Implementation of simple k-means clustering algorithm */ /* This program takes an array of N data objects, each with */ /* M coordinates and performs a k-means clustering given a */ /* user-provided value of the number of clusters (K). The */ /* clustering results are saved in 2 arrays: */ /* 1. a returned array of size [K][N] indicating the center */ /* coordinates of K clusters */ /* 2. membership[N] stores the cluster center ids, each */ /* corresponding to the cluster a data object is assigned */ /* */ /* Author: Wei-keng Liao */ /* ECE Department, Northwestern University */ /* email: [email protected] */ /* Copyright, 2005, Wei-keng Liao */ /* */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ // Copyright (c) 2005 Wei-keng Liao // Copyright (c) 2011 Serban Giuroiu // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // ----------------------------------------------------------------------------- #define BLOCKSIZE2 1024 //#define OUTPUT_SIZE //#define OUTPUT_TIME #define NUMBER 8 #define EXTRA 1 //#define OUTPUT_RESULT #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "kmeans.h" /* static inline int nextPowerOfTwo(int n) { n--; n = n >> 1 | n; n = n >> 2 | n; n = n >> 4 | n; n = n >> 8 | n; n = n >> 16 | n; // n = n >> 32 | n; // For 64-bit ints return ++n; }*/ /*----< find_nearest_cluster() >---------------------------------------------*/ __global__ static void find_nearest_cluster(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *deviceClusters, // [numCoords][numClusters] float *deviceNewCluster,// [numCoords][numClusters] int *deviceNewClusterSize, //[numClusters] int *membership, // [numObjs] int *intermediates) { extern __shared__ char sharedMemory[]; unsigned int tid = threadIdx.x; // The type chosen for membershipChanged must be large enough to support // reductions! There are blockDim.x elements, one for each thread in the // block. // unsigned char *membershipChanged = (unsigned char *)sharedMemory; float *clusters = (float *)(sharedMemory );//+ blockDim.x); // membershipChanged[tid] = 0; // BEWARE: We can overrun our shared memory here if there are too many // clusters or too many coordinates! // using CUDA unroll #pragma unroll for (int i = tid; i < numClusters; i += blockDim.x) { for (int j = 0; j < numCoords; j++) { clusters[(numClusters+1) * j + i] = deviceClusters[numClusters * j + i]; } } __syncthreads(); int objectId = blockDim.x * blockIdx.x + tid; if (objectId < numObjs) { int index; float dist, min_dist=1e20; /* find the cluster id that has min distance to object */ index = 0; for (int i=0;i<numClusters;i++){ dist = 0; #pragma unroll for (int j = 0; j < numCoords; j++) { float x = objects[numObjs * j + objectId]; float y = clusters[(numClusters+1) * j + i]; dist += (x-y)*(x-y); } if (dist<min_dist){ min_dist = dist; index = i; } } //use atomic add to calculate new clusters. Phase 1 : sum up #pragma unroll for (int j=0; j<numCoords; j++) atomicAdd(&deviceNewCluster[j*numClusters + index], objects[j*numObjs+objectId]); // assign the membership to object objectId if (membership[objectId] != index) atomicAdd(&intermediates[0],1); membership[objectId] = index; //use atomic add to calculate new clusters. Phase 2 : sum up cluster size atomicAdd(&deviceNewClusterSize[index],1); } } // special treatement for nCoords = 22 __global__ static void find_nearest_cluster_666(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *deviceClusters, // [numCoords][numClusters] float *deviceNewCluster,// [numCoords][numClusters] int *deviceNewClusterSize, //[numClusters] int *membership, // [numObjs] int *intermediates) { extern __shared__ char sharedMemory[]; unsigned int tid = threadIdx.x; // The type chosen for membershipChanged must be large enough to support // reductions! There are blockDim.x elements, one for each thread in the // block. // unsigned char *membershipChanged = (unsigned char *)sharedMemory; float *clusters = (float *)(sharedMemory );//+ blockDim.x); // membershipChanged[tid] = 0; // BEWARE: We can overrun our shared memory here if there are too many // clusters or too many coordinates! // using CUDA unroll #pragma unroll for (int i = tid; i < numClusters; i += blockDim.x) { for (int j = 0; j < numCoords; j++) { clusters[(numClusters+1) * j + i] = deviceClusters[numClusters * j + i]; } } __syncthreads(); int objectId = blockDim.x * blockIdx.x + tid; if (objectId < numObjs) { int index; float dist, min_dist=1e20; /* find the cluster id that has min distance to object */ index = 0; for (int i=0;i<numClusters;i++){ dist = 0; #pragma unroll for (int j = 0; j < 22; j++) { float x = objects[numObjs * j + objectId]; float y = clusters[(numClusters+1) * j + i]; dist += (x-y)*(x-y); } if (dist<min_dist){ min_dist = dist; index = i; } } //use atomic add to calculate new clusters. Phase 1 : sum up #pragma unroll for (int j=0; j<numCoords; j++) atomicAdd(&deviceNewCluster[j*numClusters + index], objects[j*numObjs+objectId]); // assign the membership to object objectId if (membership[objectId] != index) atomicAdd(&intermediates[0],1); membership[objectId] = index; atomicAdd(&deviceNewClusterSize[index],1); } } // special treatement for nCoords = 8 __global__ static void find_nearest_cluster_2333(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *deviceClusters, // [numCoords][numClusters] float *deviceNewCluster,// [numCoords][numClusters] int *deviceNewClusterSize, //[numClusters] int *membership, // [numObjs] int *intermediates) { extern __shared__ char sharedMemory[]; unsigned int tid = threadIdx.x; // The type chosen for membershipChanged must be large enough to support // reductions! There are blockDim.x elements, one for each thread in the // block. // unsigned char *membershipChanged = (unsigned char *)sharedMemory; float *clusters = (float *)(sharedMemory );//+ blockDim.x); // membershipChanged[tid] = 0; // BEWARE: We can overrun our shared memory here if there are too many // clusters or too many coordinates! // using CUDA unroll #pragma unroll for (int i = tid; i < numClusters; i += blockDim.x) { for (int j = 0; j < numCoords; j++) { clusters[(numClusters+1) * j + i] = deviceClusters[numClusters * j + i]; } } __syncthreads(); int objectId = blockDim.x * blockIdx.x + tid; if (objectId < numObjs) { int index; float dist, min_dist=1e20; /* find the cluster id that has min distance to object */ index = 0; for (int i=0;i<numClusters;i++){ dist = 0; #pragma unroll for (int j = 0; j < 8; j++) { float x = objects[numObjs * j + objectId]; float y = clusters[(numClusters+1) * j + i]; dist += (x-y)*(x-y); } if (dist<min_dist){ min_dist = dist; index = i; } } //use atomic add to calculate new clusters. Phase 1 : sum up #pragma unroll for (int j=0; j<numCoords; j++) atomicAdd(&deviceNewCluster[j*numClusters + index], objects[j*numObjs+objectId]); // assign the membership to object objectId if (membership[objectId] != index) atomicAdd(&intermediates[0],1); membership[objectId] = index; atomicAdd(&deviceNewClusterSize[index],1); } } /*----< cuda_kmeans() >-------------------------------------------------------*/ // // ---------------------------------------- // DATA LAYOUT // // objects [numObjs][numCoords] // clusters [numClusters][numCoords] // dimObjects [numCoords][numObjs] // dimClusters [numCoords][numClusters] // newClusters [numCoords][numClusters] // deviceObjects [numCoords][numObjs] // deviceClusters [numCoords][numClusters] // ---------------------------------------- // /* return an array of cluster centers of size [numClusters][numCoords] */ float** cuda_kmeans(float **objects, /* in: [numObjs][numCoords] */ int numCoords, /* no. features */ int numObjs, /* no. objects */ int numClusters, /* no. clusters */ float threshold, /* % objects change membership */ int *membership, /* out: [numObjs] */ int *loop_iterations) { int i, j, index, loop=0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ float **dimObjects; float **clusters; /* out: [numClusters][numCoords] */ float **dimClusters; float **newClusters; /* [numCoords][numClusters] */ float *deviceObjects; float *deviceClusters; float *deviceNewCluster; //new int *deviceNewClusterSize; //new int *deviceMembership; int *deviceIntermediates; // Copy objects given in [numObjs][numCoords] layout to new // [numCoords][numObjs] layout malloc2D(dimObjects, numCoords, numObjs, float); #pragma omp parallel for for (i = 0; i < numCoords; i++) { for (j = 0; j < numObjs; j++) { dimObjects[i][j] = objects[j][i]; } } /* pick first numClusters elements of objects[] as initial cluster centers*/ malloc2D(dimClusters, numCoords, numClusters, float); #pragma omp parallel for for (i = 0; i < numCoords; i++) { for (j = 0; j < numClusters; j++) { dimClusters[i][j] = dimObjects[i][j]; } } /* initialize membership[] */ for (i=0; i<numObjs; i++) membership[i] = -1; /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int*) calloc(EXTRA*numClusters, sizeof(int)); assert(newClusterSize != NULL); malloc2D(newClusters, numCoords, numClusters, float); memset(newClusters[0], 0, numCoords * numClusters * sizeof(float)); // To support reduction, numThreadsPerClusterBlock *must* be a power of // two, and it *must* be no larger than the number of bits that will // fit into an unsigned char, the type used to keep track of membership // changes in the kernel. const unsigned int numThreadsPerClusterBlock = 128; const unsigned int numClusterBlocks = (numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock; const unsigned int clusterBlockSharedDataSize = // numThreadsPerClusterBlock * sizeof(unsigned char) + (numClusters+1) * numCoords * sizeof(float); const unsigned int numReductionThreads = 1; // nextPowerOfTwo(numClusterBlocks); // const unsigned int reductionBlockSharedDataSize = // numReductionThreads * sizeof(unsigned int); checkCuda(cudaMalloc(&deviceObjects, numObjs*numCoords*sizeof(float))); checkCuda(cudaMalloc(&deviceClusters, numClusters*numCoords*sizeof(float))); checkCuda(cudaMalloc(&deviceMembership, numObjs*sizeof(int))); checkCuda(cudaMalloc(&deviceIntermediates, numReductionThreads*sizeof(unsigned int))); checkCuda(cudaMemset(deviceIntermediates, 0,numReductionThreads*sizeof(unsigned int))); checkCuda(cudaMemcpy(deviceObjects, dimObjects[0], numObjs*numCoords*sizeof(float), cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(deviceMembership, membership, numObjs*sizeof(int), cudaMemcpyHostToDevice)); checkCuda(cudaMalloc(&deviceNewCluster, numClusters*numCoords*sizeof(float))); checkCuda(cudaMalloc(&deviceNewClusterSize, EXTRA*numClusters * sizeof(int) )); checkCuda(cudaMemset(deviceNewCluster, 0, numClusters*numCoords*sizeof(float))); checkCuda(cudaMemset(deviceNewClusterSize, 0, EXTRA*numClusters * sizeof(int))); //out of the loop! checkCuda(cudaMemcpy(deviceClusters, dimClusters[0], numClusters*numCoords*sizeof(float), cudaMemcpyHostToDevice)); do { #ifdef OUTPUT_TIME struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); #endif #ifdef OUTPUT_SIZE printf("\nnumClusterBlocks = %d, numThreadPerCB = %d\n",numClusterBlocks,numThreadsPerClusterBlock); #endif if (numCoords == 8) find_nearest_cluster_2333 <<< numClusterBlocks, numThreadsPerClusterBlock, clusterBlockSharedDataSize>>> (numCoords, numObjs, numClusters, deviceObjects, deviceClusters, deviceNewCluster, deviceNewClusterSize, deviceMembership, deviceIntermediates); else if (numCoords == 22) find_nearest_cluster_666 <<< numClusterBlocks, numThreadsPerClusterBlock, clusterBlockSharedDataSize>>> (numCoords, numObjs, numClusters, deviceObjects, deviceClusters, deviceNewCluster, deviceNewClusterSize, deviceMembership, deviceIntermediates); else find_nearest_cluster <<< numClusterBlocks, numThreadsPerClusterBlock, clusterBlockSharedDataSize >>> (numCoords, numObjs, numClusters, deviceObjects, deviceClusters, deviceNewCluster, deviceNewClusterSize, deviceMembership, deviceIntermediates); cudaThreadSynchronize(); checkLastCudaError(); #ifdef OUTPUT_TIME gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); printf("#%d %ld.%06ld\t",loop, (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); gettimeofday(&tval_before, NULL); #endif // Delta reduction int d; checkCuda(cudaMemcpy(&d, deviceIntermediates, sizeof(int), cudaMemcpyDeviceToHost)); delta = (float)d; checkCuda(cudaMemset(deviceIntermediates,0, numReductionThreads*sizeof(unsigned int))); #ifdef OUTPUT_TIME gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); printf(" %ld.%06ld\t", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); gettimeofday(&tval_before, NULL); #endif // calculate new clusters, phase 3 checkCuda(cudaMemcpy(newClusters[0], deviceNewCluster, numClusters*numCoords*sizeof(float), cudaMemcpyDeviceToHost)); checkCuda(cudaMemcpy(newClusterSize,deviceNewClusterSize, EXTRA*numClusters * sizeof(int), cudaMemcpyDeviceToHost )); #ifdef OUTPUT_RESULT printf("Membership:\n"); #endif for (i=0; i<numClusters; i++) { #ifdef OUTPUT_RESULT printf("%d ",newClusterSize[i]); #endif for (j=0; j<numCoords; j++) { if (newClusterSize[i] > 0) dimClusters[j][i] = newClusters[j][i] / newClusterSize[i]; } } #ifdef OUTPUT_RESULT printf("\nClusters:\n"); for (i=0;i<numClusters;i++){ for (j=0;j<numCoords;j++) printf("%f ",dimClusters[j][i]); printf("\n"); } #endif checkCuda(cudaMemcpy(deviceClusters, dimClusters[0], numClusters*numCoords*sizeof(float), cudaMemcpyHostToDevice)); checkCuda(cudaMemset(deviceNewCluster, 0, numClusters*numCoords*sizeof(float))); checkCuda(cudaMemset(deviceNewClusterSize, 0, EXTRA*numClusters * sizeof(int))); #ifdef OUTPUT_TIME gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); printf(", %ld.%06ld\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); #endif delta /= numObjs; } while (delta > threshold && loop++ < 500); //====================================================== // calculate again to handle float-point errors checkCuda(cudaMemcpy(membership, deviceMembership, numObjs*sizeof(int), cudaMemcpyDeviceToHost)); for (i=0; i<numObjs; i++) { index = membership[i]; newClusterSize[index]++; for (j=0; j<numCoords; j++){ newClusters[j][index] += objects[i][j]; } } #ifdef OUTPUT_RESULT printf("Membership:\n"); #endif for (i=0; i<numClusters; i++) { #ifdef OUTPUT_RESULT printf("%d ",newClusterSize[i]); #endif for (j=0; j<numCoords; j++) { if (newClusterSize[i] > 0) dimClusters[j][i] = newClusters[j][i] / newClusterSize[i]; newClusters[j][i] = 0.0; } newClusterSize[i] = 0; } #ifdef OUTPUT_RESULT printf("\nClusters:\n"); for (i=0;i<numClusters;i++){ for (j=0;j<numCoords;j++) printf("%f ",dimClusters[j][i]); printf("\n"); } #endif checkCuda(cudaMemcpy(deviceClusters, dimClusters[0], numClusters*numCoords*sizeof(float), cudaMemcpyHostToDevice)); //===================================================== *loop_iterations = loop + 1; /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ malloc2D(clusters, numClusters, numCoords, float); //GPU -> mem checkCuda(cudaMemcpy(dimClusters[0], deviceClusters, numClusters*numCoords*sizeof(float), cudaMemcpyDeviceToHost)); for (i = 0; i < numClusters; i++) { for (j = 0; j < numCoords; j++) { clusters[i][j] = dimClusters[j][i]; //printf("%f ",clusters[i][j]); } // printf("\n"); } checkCuda(cudaFree(deviceObjects)); checkCuda(cudaFree(deviceClusters)); checkCuda(cudaFree(deviceMembership)); checkCuda(cudaFree(deviceIntermediates)); checkCuda(cudaFree(deviceNewCluster)); checkCuda(cudaFree(deviceNewClusterSize)); free(dimObjects[0]); free(dimObjects); free(dimClusters[0]); free(dimClusters); free(newClusters[0]); free(newClusters); free(newClusterSize); return clusters; }
41f31c7d3af958c4e67c7a9e985de09d9d516147.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // given code from https://github.com/Wigner-GPU-Lab/Teaching/tree/gpgpu2-2020-1/GPGPU1/CUDA/matmul // used headers and libraries #include <vector> #include <numeric> #include <algorithm> #include <random> #include <chrono> #include <iostream> #include "cpu_matmul.h" // matrix block size static const int MBS = 32; // kernel __global__ void matmul_improved(float *C, float *A, float *B, int N) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float Atmp[MBS * MBS]; __shared__ float Btmp[MBS * MBS]; float sum = 0; for (int K = 0; K < N / MBS; ++K) { Atmp[threadIdx.y * MBS + threadIdx.x] = A[y * N + (K * MBS + threadIdx.x)]; Btmp[threadIdx.y * MBS + threadIdx.x] = B[(K * MBS + threadIdx.y) * N + x]; __syncthreads(); for (int k = 0; k < MBS; ++k) { sum += Atmp[threadIdx.y * MBS + k] * Btmp[k * MBS + threadIdx.x]; } __syncthreads(); } C[y * N + x] = sum; } // main function int main() { const int N = 1024; const int block_sz = MBS; const int n_blocks = N / MBS; std::vector<float> A(N * N); std::vector<float> B(N * N); std::vector<float> C0(N * N); std::vector<float> C1(N * N); std::vector<float> C2(N * N); // generates random integers std::mt19937 mersenne_engine{42}; std::uniform_real_distribution<float> dist{-0.1f, 0.1f}; auto gen = [&dist, &mersenne_engine]() { return dist(mersenne_engine); }; generate(A.begin(), A.end(), gen); generate(B.begin(), B.end(), gen); std::fill(C0.begin(), C0.end(), 0.0f); std::fill(C1.begin(), C1.end(), 0.0f); std::fill(C2.begin(), C2.end(), 0.0f); float *pA = nullptr; float *pB = nullptr; float *pC2 = nullptr; hipEvent_t evt[2]; for (auto &e : evt) { hipEventCreate(&e); } hipError_t err = hipSuccess; err = hipMalloc((void **)&pA, N * N * sizeof(float)); if (err != hipSuccess) { std::cout << "Error allocating CUDA memory: " << hipGetErrorString(err) << "\n"; return -1; } err = hipMalloc((void **)&pB, N * N * sizeof(float)); if (err != hipSuccess) { std::cout << "Error allocating CUDA memory: " << hipGetErrorString(err) << "\n"; return -1; } err = hipMalloc((void **)&pC2, N * N * sizeof(float)); if (err != hipSuccess) { std::cout << "Error allocating CUDA memory: " << hipGetErrorString(err) << "\n"; return -1; } err = hipMemcpy(pA, A.data(), N * N * sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { std::cout << "Error copying memory to device: " << hipGetErrorString(err) << "\n"; return -1; } err = hipMemcpy(pB, B.data(), N * N * sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { std::cout << "Error copying memory to device: " << hipGetErrorString(err) << "\n"; return -1; } { dim3 dimGrid(n_blocks, n_blocks); dim3 dimBlock(block_sz, block_sz); hipEventRecord(evt[0]); hipLaunchKernelGGL(( matmul_improved), dim3(dimGrid), dim3(dimBlock), 0, 0, pC2, pA, pB, N); err = hipGetLastError(); if (err != hipSuccess) { std::cout << "CUDA error in kernel call: " << hipGetErrorString(err) << "\n"; return -1; } hipEventRecord(evt[1]); } err = hipMemcpy(C2.data(), pC2, N * N * sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) { std::cout << "Error copying memory to host: " << hipGetErrorString(err) << "\n"; return -1; } err = hipFree(pA); if (err != hipSuccess) { std::cout << "Error freeing allocation: " << hipGetErrorString(err) << "\n"; return -1; } err = hipFree(pB); if (err != hipSuccess) { std::cout << "Error freeing allocation: " << hipGetErrorString(err) << "\n"; return -1; } err = hipFree(pC2); if (err != hipSuccess) { std::cout << "Error freeing allocation: " << hipGetErrorString(err) << "\n"; return -1; } hipEventSynchronize(evt[1]); // milliseconds float dt = 0.0f; hipEventElapsedTime(&dt, evt[0], evt[1]); for (auto &e : evt) { hipEventDestroy(e); } auto t0 = std::chrono::high_resolution_clock::now(); cpu_matmul_naive(C0, A, B, N); auto t1 = std::chrono::high_resolution_clock::now(); cpu_matmul_improved(C1, A, B, N, MBS); auto t2 = std::chrono::high_resolution_clock::now(); const float max_err = 1e-5f; auto comparator = [max_err](float l, float r) { return std::abs(l - r) < max_err; }; for (int i = 0; i < N * N; ++i) { if (!comparator(C0[i], C1[i])) { std::cout << "C0 vs C1 [" << i << "] : " << C0[i] << " " << C1[i] << " absolute error: " << std::abs(C0[i] - C1[i]) << "\n"; } } for (int i = 0; i < N * N; ++i) { if (!comparator(C0[i], C2[i])) { std::cout << "C0 vs C2 [" << i << "] : " << C0[i] << " " << C2[i] << " absolute error: " << std::abs(C0[i] - C2[i]) << "\n"; } } if (std::equal(C0.begin(), C0.end(), C1.begin(), comparator)) { std::cout << "CPU improved matches CPU naive.\n"; } else { std::cout << "Mismatch in the two CPU results.\n"; } if (std::equal(C0.begin(), C0.end(), C2.begin(), comparator)) { std::cout << "GPU improved matches CPU naive.\n"; } else { std::cout << "Mismatch in CPU and GPU results.\n"; } std::cout << "CPU naive Computation took: " << std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count() / 1000.0f << " ms\n"; std::cout << "CPU improved Computation took: " << std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f << " ms\n"; std::cout << "GPU improved Computation took: " << dt << " ms.\n"; return 0; }
41f31c7d3af958c4e67c7a9e985de09d9d516147.cu
// given code from https://github.com/Wigner-GPU-Lab/Teaching/tree/gpgpu2-2020-1/GPGPU1/CUDA/matmul // used headers and libraries #include <vector> #include <numeric> #include <algorithm> #include <random> #include <chrono> #include <iostream> #include "cpu_matmul.h" // matrix block size static const int MBS = 32; // kernel __global__ void matmul_improved(float *C, float *A, float *B, int N) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float Atmp[MBS * MBS]; __shared__ float Btmp[MBS * MBS]; float sum = 0; for (int K = 0; K < N / MBS; ++K) { Atmp[threadIdx.y * MBS + threadIdx.x] = A[y * N + (K * MBS + threadIdx.x)]; Btmp[threadIdx.y * MBS + threadIdx.x] = B[(K * MBS + threadIdx.y) * N + x]; __syncthreads(); for (int k = 0; k < MBS; ++k) { sum += Atmp[threadIdx.y * MBS + k] * Btmp[k * MBS + threadIdx.x]; } __syncthreads(); } C[y * N + x] = sum; } // main function int main() { const int N = 1024; const int block_sz = MBS; const int n_blocks = N / MBS; std::vector<float> A(N * N); std::vector<float> B(N * N); std::vector<float> C0(N * N); std::vector<float> C1(N * N); std::vector<float> C2(N * N); // generates random integers std::mt19937 mersenne_engine{42}; std::uniform_real_distribution<float> dist{-0.1f, 0.1f}; auto gen = [&dist, &mersenne_engine]() { return dist(mersenne_engine); }; generate(A.begin(), A.end(), gen); generate(B.begin(), B.end(), gen); std::fill(C0.begin(), C0.end(), 0.0f); std::fill(C1.begin(), C1.end(), 0.0f); std::fill(C2.begin(), C2.end(), 0.0f); float *pA = nullptr; float *pB = nullptr; float *pC2 = nullptr; cudaEvent_t evt[2]; for (auto &e : evt) { cudaEventCreate(&e); } cudaError_t err = cudaSuccess; err = cudaMalloc((void **)&pA, N * N * sizeof(float)); if (err != cudaSuccess) { std::cout << "Error allocating CUDA memory: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaMalloc((void **)&pB, N * N * sizeof(float)); if (err != cudaSuccess) { std::cout << "Error allocating CUDA memory: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaMalloc((void **)&pC2, N * N * sizeof(float)); if (err != cudaSuccess) { std::cout << "Error allocating CUDA memory: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaMemcpy(pA, A.data(), N * N * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cout << "Error copying memory to device: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaMemcpy(pB, B.data(), N * N * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cout << "Error copying memory to device: " << cudaGetErrorString(err) << "\n"; return -1; } { dim3 dimGrid(n_blocks, n_blocks); dim3 dimBlock(block_sz, block_sz); cudaEventRecord(evt[0]); matmul_improved<<<dimGrid, dimBlock>>>(pC2, pA, pB, N); err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << "CUDA error in kernel call: " << cudaGetErrorString(err) << "\n"; return -1; } cudaEventRecord(evt[1]); } err = cudaMemcpy(C2.data(), pC2, N * N * sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { std::cout << "Error copying memory to host: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaFree(pA); if (err != cudaSuccess) { std::cout << "Error freeing allocation: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaFree(pB); if (err != cudaSuccess) { std::cout << "Error freeing allocation: " << cudaGetErrorString(err) << "\n"; return -1; } err = cudaFree(pC2); if (err != cudaSuccess) { std::cout << "Error freeing allocation: " << cudaGetErrorString(err) << "\n"; return -1; } cudaEventSynchronize(evt[1]); // milliseconds float dt = 0.0f; cudaEventElapsedTime(&dt, evt[0], evt[1]); for (auto &e : evt) { cudaEventDestroy(e); } auto t0 = std::chrono::high_resolution_clock::now(); cpu_matmul_naive(C0, A, B, N); auto t1 = std::chrono::high_resolution_clock::now(); cpu_matmul_improved(C1, A, B, N, MBS); auto t2 = std::chrono::high_resolution_clock::now(); const float max_err = 1e-5f; auto comparator = [max_err](float l, float r) { return std::abs(l - r) < max_err; }; for (int i = 0; i < N * N; ++i) { if (!comparator(C0[i], C1[i])) { std::cout << "C0 vs C1 [" << i << "] : " << C0[i] << " " << C1[i] << " absolute error: " << std::abs(C0[i] - C1[i]) << "\n"; } } for (int i = 0; i < N * N; ++i) { if (!comparator(C0[i], C2[i])) { std::cout << "C0 vs C2 [" << i << "] : " << C0[i] << " " << C2[i] << " absolute error: " << std::abs(C0[i] - C2[i]) << "\n"; } } if (std::equal(C0.begin(), C0.end(), C1.begin(), comparator)) { std::cout << "CPU improved matches CPU naive.\n"; } else { std::cout << "Mismatch in the two CPU results.\n"; } if (std::equal(C0.begin(), C0.end(), C2.begin(), comparator)) { std::cout << "GPU improved matches CPU naive.\n"; } else { std::cout << "Mismatch in CPU and GPU results.\n"; } std::cout << "CPU naive Computation took: " << std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count() / 1000.0f << " ms\n"; std::cout << "CPU improved Computation took: " << std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f << " ms\n"; std::cout << "GPU improved Computation took: " << dt << " ms.\n"; return 0; }
90c008e58faef254f4316e1b1c787821ebab5649.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "spatial_data.h" #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/thrust_policy.hpp> #include <raft/distance/distance_types.hpp> #include <raft/neighbors/ball_cover.cuh> #include <raft/neighbors/brute_force.cuh> #include <raft/random/make_blobs.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/count.h> #include <thrust/fill.h> #include <thrust/transform.h> #include <cstdint> #include <gtest/gtest.h> #include <iostream> #include <vector> namespace raft::neighbors::ball_cover { using namespace std; template <typename value_idx, typename value_t> __global__ void count_discrepancies_kernel(value_idx* actual_idx, value_idx* expected_idx, value_t* actual, value_t* expected, uint32_t m, uint32_t n, uint32_t* out, float thres = 1e-3) { uint32_t row = blockDim.x * blockIdx.x + threadIdx.x; int n_diffs = 0; if (row < m) { for (uint32_t i = 0; i < n; i++) { value_t d = actual[row * n + i] - expected[row * n + i]; bool matches = (fabsf(d) <= thres) || (actual_idx[row * n + i] == expected_idx[row * n + i] && actual_idx[row * n + i] == row); if (!matches) { printf( "row=%ud, n=%ud, actual_dist=%f, actual_ind=%ld, expected_dist=%f, expected_ind=%ld\n", row, i, actual[row * n + i], actual_idx[row * n + i], expected[row * n + i], expected_idx[row * n + i]); } n_diffs += !matches; out[row] = n_diffs; } } } struct is_nonzero { __host__ __device__ bool operator()(uint32_t& i) { return i > 0; } }; template <typename value_idx, typename value_t> uint32_t count_discrepancies(value_idx* actual_idx, value_idx* expected_idx, value_t* actual, value_t* expected, uint32_t m, uint32_t n, uint32_t* out, hipStream_t stream) { uint32_t tpb = 256; hipLaunchKernelGGL(( count_discrepancies_kernel), dim3(raft::ceildiv(m, tpb)), dim3(tpb), 0, stream, actual_idx, expected_idx, actual, expected, m, n, out); auto exec_policy = rmm::exec_policy(stream); uint32_t result = thrust::count_if(exec_policy, out, out + m, is_nonzero()); return result; } template <typename value_t> void compute_bfknn(const raft::resources& handle, const value_t* X1, const value_t* X2, uint32_t n_rows, uint32_t n_query_rows, uint32_t d, uint32_t k, const raft::distance::DistanceType metric, value_t* dists, int64_t* inds) { std::vector<raft::device_matrix_view<const value_t, uint32_t>> input_vec = { make_device_matrix_view(X1, n_rows, d)}; raft::neighbors::brute_force::knn(handle, input_vec, make_device_matrix_view(X2, n_query_rows, d), make_device_matrix_view(inds, n_query_rows, k), make_device_matrix_view(dists, n_query_rows, k), metric); } struct ToRadians { __device__ __host__ float operator()(float a) { return a * (CUDART_PI_F / 180.0); } }; template <typename value_int = std::uint32_t> struct BallCoverInputs { value_int k; value_int n_rows; value_int n_cols; float weight; value_int n_query; raft::distance::DistanceType metric; }; template <typename value_idx, typename value_t, typename value_int = std::uint32_t> class BallCoverKNNQueryTest : public ::testing::TestWithParam<BallCoverInputs<value_int>> { protected: void basicTest() { params = ::testing::TestWithParam<BallCoverInputs<value_int>>::GetParam(); raft::resources handle; uint32_t k = params.k; uint32_t n_centers = 25; float weight = params.weight; auto metric = params.metric; rmm::device_uvector<value_t> X(params.n_rows * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y(params.n_rows, resource::get_cuda_stream(handle)); // Make sure the train and query sets are completely disjoint rmm::device_uvector<value_t> X2(params.n_query * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y2(params.n_query, resource::get_cuda_stream(handle)); raft::random::make_blobs(X.data(), Y.data(), params.n_rows, params.n_cols, n_centers, resource::get_cuda_stream(handle)); raft::random::make_blobs(X2.data(), Y2.data(), params.n_query, params.n_cols, n_centers, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> d_ref_I(params.n_query * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_ref_D(params.n_query * k, resource::get_cuda_stream(handle)); if (metric == raft::distance::DistanceType::Haversine) { thrust::transform( resource::get_thrust_policy(handle), X.data(), X.data() + X.size(), X.data(), ToRadians()); thrust::transform(resource::get_thrust_policy(handle), X2.data(), X2.data() + X2.size(), X2.data(), ToRadians()); } compute_bfknn(handle, X.data(), X2.data(), params.n_rows, params.n_query, params.n_cols, k, metric, d_ref_D.data(), d_ref_I.data()); resource::sync_stream(handle); // Allocate predicted arrays rmm::device_uvector<value_idx> d_pred_I(params.n_query * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_pred_D(params.n_query * k, resource::get_cuda_stream(handle)); auto X_view = raft::make_device_matrix_view<value_t, value_int>(X.data(), params.n_rows, params.n_cols); auto X2_view = raft::make_device_matrix_view<const value_t, value_int>( (const value_t*)X2.data(), params.n_query, params.n_cols); auto d_pred_I_view = raft::make_device_matrix_view<value_idx, value_int>(d_pred_I.data(), params.n_query, k); auto d_pred_D_view = raft::make_device_matrix_view<value_t, value_int>(d_pred_D.data(), params.n_query, k); BallCoverIndex<value_idx, value_t, value_int, value_int> index(handle, X_view, metric); build_index(handle, index); knn_query(handle, index, X2_view, d_pred_I_view, d_pred_D_view, k, true); resource::sync_stream(handle); // What we really want are for the distances to match exactly. The // indices may or may not match exactly, depending upon the ordering which // can be nondeterministic. rmm::device_uvector<uint32_t> discrepancies(params.n_query, resource::get_cuda_stream(handle)); thrust::fill(resource::get_thrust_policy(handle), discrepancies.data(), discrepancies.data() + discrepancies.size(), 0); // int res = count_discrepancies(d_ref_I.data(), d_pred_I.data(), d_ref_D.data(), d_pred_D.data(), params.n_query, k, discrepancies.data(), resource::get_cuda_stream(handle)); ASSERT_TRUE(res == 0); } void SetUp() override {} void TearDown() override {} protected: uint32_t d = 2; BallCoverInputs<value_int> params; }; template <typename value_idx, typename value_t, typename value_int = std::uint32_t> class BallCoverAllKNNTest : public ::testing::TestWithParam<BallCoverInputs<value_int>> { protected: void basicTest() { params = ::testing::TestWithParam<BallCoverInputs<value_int>>::GetParam(); raft::resources handle; uint32_t k = params.k; uint32_t n_centers = 25; float weight = params.weight; auto metric = params.metric; rmm::device_uvector<value_t> X(params.n_rows * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y(params.n_rows, resource::get_cuda_stream(handle)); raft::random::make_blobs(X.data(), Y.data(), params.n_rows, params.n_cols, n_centers, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> d_ref_I(params.n_rows * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_ref_D(params.n_rows * k, resource::get_cuda_stream(handle)); auto X_view = raft::make_device_matrix_view<const value_t, value_int>( (const value_t*)X.data(), params.n_rows, params.n_cols); if (metric == raft::distance::DistanceType::Haversine) { thrust::transform( resource::get_thrust_policy(handle), X.data(), X.data() + X.size(), X.data(), ToRadians()); } compute_bfknn(handle, X.data(), X.data(), params.n_rows, params.n_rows, params.n_cols, k, metric, d_ref_D.data(), d_ref_I.data()); resource::sync_stream(handle); // Allocate predicted arrays rmm::device_uvector<value_idx> d_pred_I(params.n_rows * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_pred_D(params.n_rows * k, resource::get_cuda_stream(handle)); auto d_pred_I_view = raft::make_device_matrix_view<value_idx, value_int>(d_pred_I.data(), params.n_rows, k); auto d_pred_D_view = raft::make_device_matrix_view<value_t, value_int>(d_pred_D.data(), params.n_rows, k); BallCoverIndex<value_idx, value_t> index(handle, X_view, metric); all_knn_query(handle, index, d_pred_I_view, d_pred_D_view, k, true); resource::sync_stream(handle); // What we really want are for the distances to match exactly. The // indices may or may not match exactly, depending upon the ordering which // can be nondeterministic. rmm::device_uvector<uint32_t> discrepancies(params.n_rows, resource::get_cuda_stream(handle)); thrust::fill(resource::get_thrust_policy(handle), discrepancies.data(), discrepancies.data() + discrepancies.size(), 0); // uint32_t res = count_discrepancies(d_ref_I.data(), d_pred_I.data(), d_ref_D.data(), d_pred_D.data(), params.n_rows, k, discrepancies.data(), resource::get_cuda_stream(handle)); // TODO: There seem to be discrepancies here only when // the entire test suite is executed. // Ref: https://github.com/rapidsai/raft/issues/ // 1-5 mismatches in 8000 samples is 0.0125% - 0.0625% ASSERT_TRUE(res <= 5); } void SetUp() override {} void TearDown() override {} protected: BallCoverInputs<value_int> params; }; typedef BallCoverAllKNNTest<int64_t, float> BallCoverAllKNNTestF; typedef BallCoverKNNQueryTest<int64_t, float> BallCoverKNNQueryTestF; const std::vector<BallCoverInputs<std::uint32_t>> ballcover_inputs = { {11, 5000, 2, 1.0, 10000, raft::distance::DistanceType::Haversine}, {25, 10000, 2, 1.0, 5000, raft::distance::DistanceType::Haversine}, {2, 10000, 2, 1.0, 5000, raft::distance::DistanceType::L2SqrtUnexpanded}, {2, 5000, 2, 1.0, 10000, raft::distance::DistanceType::Haversine}, {11, 10000, 2, 1.0, 5000, raft::distance::DistanceType::L2SqrtUnexpanded}, {25, 5000, 2, 1.0, 10000, raft::distance::DistanceType::L2SqrtUnexpanded}, {5, 8000, 3, 1.0, 10000, raft::distance::DistanceType::L2SqrtUnexpanded}, {11, 6000, 3, 1.0, 10000, raft::distance::DistanceType::L2SqrtUnexpanded}, {25, 10000, 3, 1.0, 5000, raft::distance::DistanceType::L2SqrtUnexpanded}}; INSTANTIATE_TEST_CASE_P(BallCoverAllKNNTest, BallCoverAllKNNTestF, ::testing::ValuesIn(ballcover_inputs)); INSTANTIATE_TEST_CASE_P(BallCoverKNNQueryTest, BallCoverKNNQueryTestF, ::testing::ValuesIn(ballcover_inputs)); TEST_P(BallCoverAllKNNTestF, Fit) { basicTest(); } TEST_P(BallCoverKNNQueryTestF, Fit) { basicTest(); } } // namespace raft::neighbors::ball_cover
90c008e58faef254f4316e1b1c787821ebab5649.cu
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include "spatial_data.h" #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/thrust_policy.hpp> #include <raft/distance/distance_types.hpp> #include <raft/neighbors/ball_cover.cuh> #include <raft/neighbors/brute_force.cuh> #include <raft/random/make_blobs.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/count.h> #include <thrust/fill.h> #include <thrust/transform.h> #include <cstdint> #include <gtest/gtest.h> #include <iostream> #include <vector> namespace raft::neighbors::ball_cover { using namespace std; template <typename value_idx, typename value_t> __global__ void count_discrepancies_kernel(value_idx* actual_idx, value_idx* expected_idx, value_t* actual, value_t* expected, uint32_t m, uint32_t n, uint32_t* out, float thres = 1e-3) { uint32_t row = blockDim.x * blockIdx.x + threadIdx.x; int n_diffs = 0; if (row < m) { for (uint32_t i = 0; i < n; i++) { value_t d = actual[row * n + i] - expected[row * n + i]; bool matches = (fabsf(d) <= thres) || (actual_idx[row * n + i] == expected_idx[row * n + i] && actual_idx[row * n + i] == row); if (!matches) { printf( "row=%ud, n=%ud, actual_dist=%f, actual_ind=%ld, expected_dist=%f, expected_ind=%ld\n", row, i, actual[row * n + i], actual_idx[row * n + i], expected[row * n + i], expected_idx[row * n + i]); } n_diffs += !matches; out[row] = n_diffs; } } } struct is_nonzero { __host__ __device__ bool operator()(uint32_t& i) { return i > 0; } }; template <typename value_idx, typename value_t> uint32_t count_discrepancies(value_idx* actual_idx, value_idx* expected_idx, value_t* actual, value_t* expected, uint32_t m, uint32_t n, uint32_t* out, cudaStream_t stream) { uint32_t tpb = 256; count_discrepancies_kernel<<<raft::ceildiv(m, tpb), tpb, 0, stream>>>( actual_idx, expected_idx, actual, expected, m, n, out); auto exec_policy = rmm::exec_policy(stream); uint32_t result = thrust::count_if(exec_policy, out, out + m, is_nonzero()); return result; } template <typename value_t> void compute_bfknn(const raft::resources& handle, const value_t* X1, const value_t* X2, uint32_t n_rows, uint32_t n_query_rows, uint32_t d, uint32_t k, const raft::distance::DistanceType metric, value_t* dists, int64_t* inds) { std::vector<raft::device_matrix_view<const value_t, uint32_t>> input_vec = { make_device_matrix_view(X1, n_rows, d)}; raft::neighbors::brute_force::knn(handle, input_vec, make_device_matrix_view(X2, n_query_rows, d), make_device_matrix_view(inds, n_query_rows, k), make_device_matrix_view(dists, n_query_rows, k), metric); } struct ToRadians { __device__ __host__ float operator()(float a) { return a * (CUDART_PI_F / 180.0); } }; template <typename value_int = std::uint32_t> struct BallCoverInputs { value_int k; value_int n_rows; value_int n_cols; float weight; value_int n_query; raft::distance::DistanceType metric; }; template <typename value_idx, typename value_t, typename value_int = std::uint32_t> class BallCoverKNNQueryTest : public ::testing::TestWithParam<BallCoverInputs<value_int>> { protected: void basicTest() { params = ::testing::TestWithParam<BallCoverInputs<value_int>>::GetParam(); raft::resources handle; uint32_t k = params.k; uint32_t n_centers = 25; float weight = params.weight; auto metric = params.metric; rmm::device_uvector<value_t> X(params.n_rows * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y(params.n_rows, resource::get_cuda_stream(handle)); // Make sure the train and query sets are completely disjoint rmm::device_uvector<value_t> X2(params.n_query * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y2(params.n_query, resource::get_cuda_stream(handle)); raft::random::make_blobs(X.data(), Y.data(), params.n_rows, params.n_cols, n_centers, resource::get_cuda_stream(handle)); raft::random::make_blobs(X2.data(), Y2.data(), params.n_query, params.n_cols, n_centers, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> d_ref_I(params.n_query * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_ref_D(params.n_query * k, resource::get_cuda_stream(handle)); if (metric == raft::distance::DistanceType::Haversine) { thrust::transform( resource::get_thrust_policy(handle), X.data(), X.data() + X.size(), X.data(), ToRadians()); thrust::transform(resource::get_thrust_policy(handle), X2.data(), X2.data() + X2.size(), X2.data(), ToRadians()); } compute_bfknn(handle, X.data(), X2.data(), params.n_rows, params.n_query, params.n_cols, k, metric, d_ref_D.data(), d_ref_I.data()); resource::sync_stream(handle); // Allocate predicted arrays rmm::device_uvector<value_idx> d_pred_I(params.n_query * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_pred_D(params.n_query * k, resource::get_cuda_stream(handle)); auto X_view = raft::make_device_matrix_view<value_t, value_int>(X.data(), params.n_rows, params.n_cols); auto X2_view = raft::make_device_matrix_view<const value_t, value_int>( (const value_t*)X2.data(), params.n_query, params.n_cols); auto d_pred_I_view = raft::make_device_matrix_view<value_idx, value_int>(d_pred_I.data(), params.n_query, k); auto d_pred_D_view = raft::make_device_matrix_view<value_t, value_int>(d_pred_D.data(), params.n_query, k); BallCoverIndex<value_idx, value_t, value_int, value_int> index(handle, X_view, metric); build_index(handle, index); knn_query(handle, index, X2_view, d_pred_I_view, d_pred_D_view, k, true); resource::sync_stream(handle); // What we really want are for the distances to match exactly. The // indices may or may not match exactly, depending upon the ordering which // can be nondeterministic. rmm::device_uvector<uint32_t> discrepancies(params.n_query, resource::get_cuda_stream(handle)); thrust::fill(resource::get_thrust_policy(handle), discrepancies.data(), discrepancies.data() + discrepancies.size(), 0); // int res = count_discrepancies(d_ref_I.data(), d_pred_I.data(), d_ref_D.data(), d_pred_D.data(), params.n_query, k, discrepancies.data(), resource::get_cuda_stream(handle)); ASSERT_TRUE(res == 0); } void SetUp() override {} void TearDown() override {} protected: uint32_t d = 2; BallCoverInputs<value_int> params; }; template <typename value_idx, typename value_t, typename value_int = std::uint32_t> class BallCoverAllKNNTest : public ::testing::TestWithParam<BallCoverInputs<value_int>> { protected: void basicTest() { params = ::testing::TestWithParam<BallCoverInputs<value_int>>::GetParam(); raft::resources handle; uint32_t k = params.k; uint32_t n_centers = 25; float weight = params.weight; auto metric = params.metric; rmm::device_uvector<value_t> X(params.n_rows * params.n_cols, resource::get_cuda_stream(handle)); rmm::device_uvector<uint32_t> Y(params.n_rows, resource::get_cuda_stream(handle)); raft::random::make_blobs(X.data(), Y.data(), params.n_rows, params.n_cols, n_centers, resource::get_cuda_stream(handle)); rmm::device_uvector<value_idx> d_ref_I(params.n_rows * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_ref_D(params.n_rows * k, resource::get_cuda_stream(handle)); auto X_view = raft::make_device_matrix_view<const value_t, value_int>( (const value_t*)X.data(), params.n_rows, params.n_cols); if (metric == raft::distance::DistanceType::Haversine) { thrust::transform( resource::get_thrust_policy(handle), X.data(), X.data() + X.size(), X.data(), ToRadians()); } compute_bfknn(handle, X.data(), X.data(), params.n_rows, params.n_rows, params.n_cols, k, metric, d_ref_D.data(), d_ref_I.data()); resource::sync_stream(handle); // Allocate predicted arrays rmm::device_uvector<value_idx> d_pred_I(params.n_rows * k, resource::get_cuda_stream(handle)); rmm::device_uvector<value_t> d_pred_D(params.n_rows * k, resource::get_cuda_stream(handle)); auto d_pred_I_view = raft::make_device_matrix_view<value_idx, value_int>(d_pred_I.data(), params.n_rows, k); auto d_pred_D_view = raft::make_device_matrix_view<value_t, value_int>(d_pred_D.data(), params.n_rows, k); BallCoverIndex<value_idx, value_t> index(handle, X_view, metric); all_knn_query(handle, index, d_pred_I_view, d_pred_D_view, k, true); resource::sync_stream(handle); // What we really want are for the distances to match exactly. The // indices may or may not match exactly, depending upon the ordering which // can be nondeterministic. rmm::device_uvector<uint32_t> discrepancies(params.n_rows, resource::get_cuda_stream(handle)); thrust::fill(resource::get_thrust_policy(handle), discrepancies.data(), discrepancies.data() + discrepancies.size(), 0); // uint32_t res = count_discrepancies(d_ref_I.data(), d_pred_I.data(), d_ref_D.data(), d_pred_D.data(), params.n_rows, k, discrepancies.data(), resource::get_cuda_stream(handle)); // TODO: There seem to be discrepancies here only when // the entire test suite is executed. // Ref: https://github.com/rapidsai/raft/issues/ // 1-5 mismatches in 8000 samples is 0.0125% - 0.0625% ASSERT_TRUE(res <= 5); } void SetUp() override {} void TearDown() override {} protected: BallCoverInputs<value_int> params; }; typedef BallCoverAllKNNTest<int64_t, float> BallCoverAllKNNTestF; typedef BallCoverKNNQueryTest<int64_t, float> BallCoverKNNQueryTestF; const std::vector<BallCoverInputs<std::uint32_t>> ballcover_inputs = { {11, 5000, 2, 1.0, 10000, raft::distance::DistanceType::Haversine}, {25, 10000, 2, 1.0, 5000, raft::distance::DistanceType::Haversine}, {2, 10000, 2, 1.0, 5000, raft::distance::DistanceType::L2SqrtUnexpanded}, {2, 5000, 2, 1.0, 10000, raft::distance::DistanceType::Haversine}, {11, 10000, 2, 1.0, 5000, raft::distance::DistanceType::L2SqrtUnexpanded}, {25, 5000, 2, 1.0, 10000, raft::distance::DistanceType::L2SqrtUnexpanded}, {5, 8000, 3, 1.0, 10000, raft::distance::DistanceType::L2SqrtUnexpanded}, {11, 6000, 3, 1.0, 10000, raft::distance::DistanceType::L2SqrtUnexpanded}, {25, 10000, 3, 1.0, 5000, raft::distance::DistanceType::L2SqrtUnexpanded}}; INSTANTIATE_TEST_CASE_P(BallCoverAllKNNTest, BallCoverAllKNNTestF, ::testing::ValuesIn(ballcover_inputs)); INSTANTIATE_TEST_CASE_P(BallCoverKNNQueryTest, BallCoverKNNQueryTestF, ::testing::ValuesIn(ballcover_inputs)); TEST_P(BallCoverAllKNNTestF, Fit) { basicTest(); } TEST_P(BallCoverKNNQueryTestF, Fit) { basicTest(); } } // namespace raft::neighbors::ball_cover
e220f2bbac8415d9c45719603cfadb001a7b3ea3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void d_addToCurrentTransform(float* d_currentTransform, float* d_invViewMatrix) { float result[12] = {0.f}; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 4; ++j) { for (int k = 0; k < 4; ++k) { result[i * 4 + j] += d_invViewMatrix[i * 4 + k] * d_currentTransform[k * 4 + j]; } } } for (int i = 0; i < 12; ++i) { // The last row of currentTransform remains (0,0,0,1) d_currentTransform[i] = result[i]; } }
e220f2bbac8415d9c45719603cfadb001a7b3ea3.cu
#include "includes.h" __global__ void d_addToCurrentTransform(float* d_currentTransform, float* d_invViewMatrix) { float result[12] = {0.f}; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 4; ++j) { for (int k = 0; k < 4; ++k) { result[i * 4 + j] += d_invViewMatrix[i * 4 + k] * d_currentTransform[k * 4 + j]; } } } for (int i = 0; i < 12; ++i) { // The last row of currentTransform remains (0,0,0,1) d_currentTransform[i] = result[i]; } }
c2ecc577d78ac45ff083bfa26d633bd16d7c00e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "../../XDevice.h" #include "../../XUtility.h" #include "ReduceSum.cuh" namespace nts{ // namespace nts(NiuTrans.Tensor) #ifdef USE_ROCM /* use PTX code to reduce float data */ __device__ __forceinline__ float shflDownReduceSum(float input) { float output; asm volatile( "{" ".reg .f32 r0;" "shfl.down.b32 r0, %1, 0x10, 0x1f;" "add.f32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x8, 0xf;" "add.f32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x4, 0x7;" "add.f32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x2, 0x3;" "add.f32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x1, 0x1;" "add.f32 %0, r0, %1;" "}" : "=f"(output) : "f"(input)); return output; } /* use PTX code to reduce int data */ __device__ __forceinline__ int shflDownReduceSum(int input) { int output; asm volatile( "{" ".reg .s32 r0;" "shfl.down.b32 r0, %1, 0x10, 0x1f;" "add.s32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x8, 0xf;" "add.s32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x4, 0x7;" "add.s32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x2, 0x3;" "add.s32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x1, 0x1;" "add.s32 %0, r0, %1;" "}" : "=r"(output) : "r"(input)); return output; } /* reduce a tensor to another that keeps the sum along a dimension - slow version Given a block of data, we go over each dimension i in the stride and we have sum_i = sum_{0<=j<strideNum} exp(input_{i,j} - shift) if isExp == true; = sum_{0<=j<strideNum} input_{i,j} - shift if isExp == false; where we can view the block as a matrix and input_{i,j} represent the item at the crossing of the i-th columne and the j-th row. >> input - the input array (representing a tensor) >> output - the sum over each block. NOTE: output is also an array >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to finish the reduce >> reducedStrideNum - the number of strides after reducation >> blockSize - size of the block (i.e., stride * strideNum) >> blockNum - how many blocks >> shift - the bias imposed on the input >> power - power of the item in the array >> isExp - specify if we perform exp() on the input */ __global__ void KernelReduceSum(DTYPE * input, DTYPE * output, int stride, int strideNum, int reducedStrideNum, int blockSize, int blockNum, DTYPE * shift, DTYPE power, bool isExp) { __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int idx = threadIdx.y * blockDim.x + threadIdx.x; unsigned int i = blockIdx.y*blockDim.y + threadIdx.y; unsigned int j = blockIdx.x*blockDim.x + threadIdx.x; if(i >= stride * blockNum) return; if(threadIdx.x == 0) bias[threadIdx.y] = shift != NULL ? shift[i] : 0; __syncthreads(); int k = i / stride; int iOffset = i % stride; bool isValid = (i < stride * blockNum && j < strideNum); DTYPE value = isValid ? input[blockSize * k + stride * j + iOffset] - bias[threadIdx.y] : 0; if(power != (DTYPE)1.0){ if(power == (DTYPE)2.0) value = value * value; else if(power == (DTYPE)0.5) value = sqrt(value); else value = pow(value, power); } if(isExp && isValid) value = exp(value); /* load data into the shared mem */ iData[threadIdx.y * blockDim.x + threadIdx.x] = value; __syncthreads(); /* do reduction in shared mem */ for (unsigned int s = blockDim.x/2; s > 0; s >>= 1){ if (threadIdx.x < s) iData[idx] += iData[idx + s]; __syncthreads(); } /* write result for this block to the output array */ if (threadIdx.x == 0 && blockIdx.x < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.x) * stride + iOffset] = iData[threadIdx.y * blockDim.x]; } /* reduce a tensor to another that keeps the sum along a dimension - slow version This is for float16 reduction. Given a block of data, we go over each dimension i in the stride and we have sum_i = sum_{0<=j<strideNum} exp(input_{i,j} - shift) if isExp == true; = sum_{0<=j<strideNum} input_{i,j} - shift if isExp == false; where we can view the block as a matrix and input_{i,j} represent the item at the crossing of the i-th columne and the j-th row. >> input - the input array (representing a tensor) >> output - the sum over each block. NOTE: output is also an array >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to finish the reduce >> reducedStrideNum - the number of strides after reducation >> blockSize - size of the block (i.e., stride * strideNum) >> blockNum - how many blocks >> shift - the bias imposed on the input >> power - power of the item in the array >> isExp - specify if we perform exp() on the input */ __global__ void KernelReduceSum(__half * input, __half * output, int stride, int strideNum, int reducedStrideNum, int blockSize, int blockNum, __half * shift, __half power, bool isExp) { int idx = threadIdx.x * blockDim.y + threadIdx.y; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if(i >= stride * blockNum) return; #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) __shared__ __half iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; __shared__ __half bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; if(threadIdx.y == 0) bias[threadIdx.x] = shift != NULL ? shift[i] : __half(0); #else __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; if(threadIdx.y == 0) bias[threadIdx.x] = shift != NULL ? __half(shift[i]) : __half(0); #endif __syncthreads(); int k = i / stride; int iOffset = i % stride; bool isValid = (i < stride * blockNum && j < strideNum); #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) __half value = isValid ? __hsub(input[blockSize * k + stride * j + iOffset], bias[threadIdx.x]) : __half(0); DTYPE power2 = __half2float(power); if(power2 != (DTYPE)1.0){ if(power2 == (DTYPE)2.0) value = __hmul(value, value); else if(power2 == (DTYPE)0.5) value = hsqrt(value); } if(isExp && isValid) value = hexp(value); #else DTYPE value = isValid ? __half2float(input[blockSize * k + stride * j + iOffset]) - __half2float(bias[threadIdx.x]) : 0; DTYPE power2 = __half2float(power); if(power2 != (DTYPE)1.0){ if(power2 == (DTYPE)2.0) value = value * value; else if(power2 == (DTYPE)0.5) value = sqrt(value); else value = pow(value, power2); } if(isExp && isValid) value = exp(value); #endif /* load data into the shared mem */ iData[threadIdx.x * blockDim.y + threadIdx.y] = value; __syncthreads(); /* do reduction in shared mem */ for (unsigned int s = blockDim.y/2; s > 0; s >>= 1){ if (threadIdx.y < s) iData[idx] += iData[idx + s]; __syncthreads(); } #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) /* write result for this block to the output array */ if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = iData[threadIdx.x * blockDim.y]; #else /* write result for this block to the output array */ if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = __half(iData[threadIdx.x * blockDim.y]); #endif } /* reduce a tensor to another that keeps the sum along a dimension - fast version >> input - the input array (representing a tensor) >> output - the sum over each block. NOTE: output is also an array >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to finish the reduce >> reducedStrideNum - the number of strides after reducation >> blockSize - size of the block (i.e., stride * strideNum) >> blockNum - how many blocks >> shift - the bias imposed on the input >> power - power of the item in the array >> isExp - specify if we perform exp() on the input */ template <unsigned int goodSize> __global__ void KernelReduceSumFast(DTYPE * input, DTYPE * output, int stride, int strideNum, int reducedStrideNum, int blockSize, int blockNum, DTYPE * shift, DTYPE power, bool isExp) { __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int j = blockIdx.x * (blockDim.x * 2) + threadIdx.x; unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; if(i >= stride * blockNum) return; if (threadIdx.x == 0) bias[threadIdx.y] = shift != NULL ? shift[i] : 0; __syncthreads(); /* first level reduction */ int k = i / stride; int iOffset = i % stride; bool isValid = j < strideNum; bool isValid2 = j + blockDim.x < strideNum; DTYPE * data = iData + threadIdx.y * blockDim.x; DTYPE * inputData = input + k * blockSize; DTYPE value = isValid ? inputData[j * stride + iOffset] - bias[threadIdx.y]: 0; DTYPE value2 = isValid2 ? inputData[(j + blockDim.x) * stride + iOffset] - bias[threadIdx.y]: 0; if(power != (DTYPE)1.0){ if(power == (DTYPE)2.0){ value = value * value; value2 = value2 * value2; } else if(power == (DTYPE)0.5){ value = sqrt(value); value2 = sqrt(value2); } else{ value = pow(value, power); value2 = pow(value2, power); } } if(isExp){ if(isValid) value = exp(value); if(isValid2) value2 = exp(value2); } value = value + value2; __syncthreads(); value = shflDownReduceSum(value); if ((tid & 0x1f) == 0) data[tid / 32] = value; __syncthreads(); if (tid < 32){ if (tid < blockDim.x / 32) value = data[tid]; else value = 0; value = shflDownReduceSum(value); if (tid == 0 && blockIdx.x < reducedStrideNum) { output[(k * reducedStrideNum + blockIdx.x) * stride + iOffset] = value; } } } /* reduce a tensor to another that keeps the sum along a dimension - fast version This is for float16 reduction >> input - the input array (representing a tensor) >> output - the sum over each block. NOTE: output is also an array >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to finish the reduce >> reducedStrideNum - the number of strides after reducation >> blockSize - size of the block (i.e., stride * strideNum) >> blockNum - how many blocks >> shift - the bias imposed on the input >> power - power of the item in the array >> isExp - specify if we perform exp() on the input */ template <unsigned int goodSize> __global__ void KernelReduceSumFast(__half * input, __half * output, int stride, int strideNum, int reducedStrideNum, int blockSize, int blockNum, __half * shift, __half power, bool isExp) { unsigned int tid = threadIdx.y; unsigned int j = blockIdx.y * (blockDim.y * 2) + threadIdx.y; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= stride * blockNum) return; #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) __shared__ __half iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ __half bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; if(threadIdx.y == 0) bias[threadIdx.x] = shift != NULL ? shift[i] : __float2half(0); #else __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; if(threadIdx.y == 0) bias[threadIdx.x] = shift != NULL ? __half2float(shift[i]) : 0; #endif __syncthreads(); /* first level reduction */ int k = i / stride; int iOffset = i % stride; bool isValid = j < strideNum; bool isValid2 = j + blockDim.y < strideNum; #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) __half * data = iData + threadIdx.x * blockDim.y; __half * inputData = input + k * blockSize; __half value = isValid ? __hsub(inputData[j * stride + iOffset], bias[threadIdx.x]) : __float2half(0); __half value2 = isValid2 ? __hsub(inputData[(j + blockDim.y) * stride + iOffset], bias[threadIdx.x]) : __float2half(0); DTYPE powerf = __half2float(power); if(powerf != (DTYPE)1.0){ if(powerf == (DTYPE)2.0){ value = __hmul(value, value); value2 = __hmul(value2, value2); } else if(powerf == (DTYPE)0.5){ value = hsqrt(value); value2 = hsqrt(value2); } } if(isExp){ if(isValid) value = hexp(value); if(isValid2) value2 = hexp(value2); } #else DTYPE * data = iData + threadIdx.x * blockDim.y; __half * inputData = input + k * blockSize; DTYPE value = isValid ? __half2float(inputData[j * stride + iOffset]) - __half2float(bias[threadIdx.x]): 0; DTYPE value2 = isValid2 ? __half2float(inputData[(j + blockDim.y) * stride + iOffset]) - __half2float(bias[threadIdx.x]): 0; DTYPE powerf = __half2float(power); if(powerf != (DTYPE)1.0){ if(powerf == (DTYPE)2.0){ value = value * value; value2 = value2 *value2; } else if(powerf == (DTYPE)0.5){ value = sqrt(value); value2 = sqrt(value2); } else{ value = pow(value, powerf); value2 = pow(value2, powerf); } } if(isExp){ if(isValid) value = exp(value); if(isValid2) value2 = exp(value2); } #endif /* load data into the shared mem */ data[tid] = value + value2; __syncthreads(); /* unroll the warp */ if(goodSize >= 512) {if(tid < 256) {data[tid] += data[tid + 256];} __syncthreads();} if(goodSize >= 256) {if(tid < 128) {data[tid] += data[tid + 128];} __syncthreads();} if(goodSize >= 128) {if(tid < 64) {data[tid] += data[tid + 64];} __syncthreads();} if(goodSize >= 64) {if(tid < 32) {data[tid] += data[tid + 32];} __syncthreads();} if(goodSize >= 32) {if(tid < 16) {data[tid] += data[tid + 16];} __syncthreads();} if(goodSize >= 16) {if(tid < 8) {data[tid] += data[tid + 8];} __syncthreads();} if(goodSize >= 8) {if(tid < 4) {data[tid] += data[tid + 4];} __syncthreads();} if(goodSize >= 4) {if(tid < 2) {data[tid] += data[tid + 2];} __syncthreads();} if(goodSize >= 2) {if(tid < 1) {data[tid] += data[tid + 1];} __syncthreads();} #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) /* write result for this block to the output array */ if(threadIdx.y == 0 && blockIdx.y < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = data[0]; #else /* write result for this block to the output array */ if(threadIdx.y == 0 && blockIdx.y < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = __float2half(data[0]); #endif } /* if data storage is discontinuius ,use this way to reduce */ __global__ void KernelReduceSumDiscontinuousStorage(DTYPE * input, DTYPE * output, int stride, int strideNum, int blockNum, DTYPE * shift, DTYPE power, bool isExp) { __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int idx = blockDim.x * blockIdx.x + threadIdx.x; int blockIndex = idx / stride; int offsetInBlock = idx % stride; if (idx >= stride * blockNum) return; bias[idx % blockDim.x] = shift != NULL ? shift[idx] : 0; DTYPE ans = 0; #pragma unroll for (int i = stride * strideNum * blockIndex + offsetInBlock; i < stride * strideNum * blockIndex + offsetInBlock + stride * strideNum; i += stride){ DTYPE value = input[i]; value = value - bias[idx % blockDim.x]; if (power != (DTYPE)1.0) { if (power == (DTYPE)2.0) { value = value * value; } else if (power == (DTYPE)0.5) { value = sqrt(value); } else { value = pow(value, power); } } if (isExp) { value = exp(value); } ans += value; } output[idx] = ans; } __global__ void KernelReduceSumOp(DTYPE * input, DTYPE * output, int stride, int strideNum, int reducedStrideNum, int blockSize, int blockNum, DTYPE * shift, DTYPE power, bool isExp) { __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK / 32]; __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; unsigned int tid = threadIdx.y; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= stride * blockNum) return; if (threadIdx.y == 0) bias[threadIdx.x] = shift != NULL ? shift[i] : 0; __syncthreads(); /* first level reduction */ int k = i / stride; int iOffset = i % stride; DTYPE threadSum = 0; DTYPE * data = iData + threadIdx.x * blockDim.y; DTYPE * inputData = input + k * blockSize; for (int it = j; it < strideNum; it += blockDim.y){ DTYPE value = inputData[it * stride + iOffset] - bias[threadIdx.x]; if (power != (DTYPE)1.0) { if (power == (DTYPE)2.0) { value = value * value; } else if (power == (DTYPE)0.5) { value = sqrt(value); } else { value = pow(value, power); } } if (isExp) value = exp(value); threadSum += value; } __syncthreads(); threadSum = shflDownReduceSum(threadSum); if ((tid & 0x1f) == 0) { data[tid / 32] = threadSum; } __syncthreads(); if (tid < 32){ if (tid < blockDim.y / 32) threadSum = data[tid]; else threadSum = 0; threadSum = shflDownReduceSum(threadSum); if (tid == 0 && blockIdx.y < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = threadSum; } } __global__ void KernelReduceSumOpLessBlocks(DTYPE * input, DTYPE * output, int strideNum, int blockNum, DTYPE * shift, DTYPE power, bool isExp) { __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int idx = threadIdx.x % 32; int idy = (blockIdx.x * blockDim.x + threadIdx.x) / 32; if (idx == 0) bias[threadIdx.x / 32] = shift != NULL ? shift[idy] : 0; int startIndex = idy * strideNum; DTYPE threadSum = 0; for (int i = idx; i < strideNum; i += 32) { DTYPE value = input[startIndex + i] - bias[threadIdx.x / 32]; if (power != (DTYPE)1.0) { if (power == (DTYPE)2.0) { value = value * value; } else if (power == (DTYPE)0.5) { value = sqrt(value); } else { value = pow(value, power); } } if (isExp) value = exp(value); threadSum += value; } threadSum = shflDownReduceSum(threadSum); if (idx == 0) output[idy] = threadSum; } /* according the GPU's sm number allocation warp num */ inline void continuousStorageThreadAllocation(dim3& grid, dim3& block, long long vectorNum, int vectorSize) { int warpNum = 4; if (vectorNum < 20 * 8) { warpNum = 8; if (vectorNum < 20 * 4) { warpNum = 16; if (warpNum < 20 * 2) warpNum = 32; } } int minWarpNum = vectorSize / 32; if (vectorSize % 32 != 0) minWarpNum++; warpNum = min(warpNum, minWarpNum); grid.x = (unsigned int)vectorNum; grid.y = 1; grid.z = 1; block.x = 1; block.y = warpNum * 32; block.z = 1; } /* this situation we use block.x * grid.x deal one vector for continuous read */ void discontinuousStorageNoShareMemThreadAllocation(dim3* grid, dim3* block, int stride, int blockNum) { block->x = 512; block->y = 1; if ((stride * blockNum) % 512 == 0) grid->x = (stride * blockNum) / 512; else grid->x = (stride * blockNum) / 512 + 1; grid->y = 1; } /* adjust threads.x number then we can use warp optimization */ void adjustThreadForUseWarpOptimization(dim3* blocks, dim3* threads) { if (threads->y > 1){ blocks->y *= threads->y; threads->y = 1; } if (threads->x < 32) threads->x = 32; } /* sum the items along a dimension of the tensor (cuda version). For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^power if isExp == false sum = \sum_i exp((a_i - shift)^power) if isExp == true >> input - the input tensor >> output - the output tensor >> dim - which dimension to reduce >> shift - the bias on the input >> power - we perform pow(item_i, power) on each item >> ieExp - specify if the exp() is performed */ void _CudaReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor * shift, DTYPE power, bool isExp) { CheckNTErrors(input && output, "Empty input or output tensors!"); CheckNTErrors(input->order == output->order + 1, "Incorrect tensor sizes!"); CheckNTErrors(input->order > dim && dim >= 0, "Illegal dimension to reduce!"); CheckNTErrors(input->dataType == output->dataType, "Unmatched data types!"); CheckNTErrors(shift == NULL || output->unitNum == shift->unitNum, "Incorrect shift tensor size!"); int dimRDI = input->order - dim - 1; for(int i = 0; i < input->order; i++){ if(i < dimRDI){ CheckNTErrors(input->dimSizeRDI[i] == output->dimSizeRDI[i], "Unmatched tensors!"); } else if(i > dimRDI){ CheckNTErrors(input->dimSizeRDI[i] == output->dimSizeRDI[i - 1], "Unmatched tensors!"); } } if(input->dataType == X_FLOAT16) CheckNTErrors(power == 0 || power == 0.5 || power == 1.0 || power == 2.0, "TODO!"); int cudaGridSize[3]; int cudaBlockSize[3]; int iter = 0; int stride = 1; int strideNum = input->dimSizeRDI[dimRDI]; int blockSize = 1; int blockNum = 1; for (int i = 0; i < input->order; i++) { if (i < dimRDI) stride *= input->dimSizeRDI[i]; else if (i > dimRDI) blockNum *= input->dimSizeRDI[i]; } blockSize = stride * strideNum; int devID = input->devID; XMem * mem = input->mem; GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); int bufSize = input->unitSize * cudaGridSize[0] * stride * blockNum * 2; DTYPE * buf = mem != NULL ? (DTYPE*)mem->AllocBuf(mem->devID, bufSize) : (DTYPE*)XMemAlloc(input->devID, bufSize); DTYPE * buf1 = buf; DTYPE * buf2 = buf + cudaGridSize[0] * stride * blockNum; DTYPE * sp = shift != NULL ? (DTYPE*)shift->data : NULL; int devIDBackup; ProtectCudaDev(input->devID, devIDBackup); if (stride == 1 && blockNum >= 10) { dim3 grids; dim3 blocks; continuousStorageThreadAllocation(grids, blocks, (long long)blockNum, strideNum); if (blocks.y >= 128) hipLaunchKernelGGL(( KernelReduceSumOp) , dim3(grids), dim3(blocks), 0, 0, (DTYPE *)input->data, (DTYPE*)output->data, stride, strideNum, grids.y, blockSize, blockNum, sp, power, isExp); else { if (blockNum % 4 != 0) blockNum = (int)(blockNum / 4) + 1; else blockNum = blockNum / 4; hipLaunchKernelGGL(( KernelReduceSumOpLessBlocks) , dim3(blockNum), dim3(128), 0, 0, (DTYPE *)input->data, (DTYPE*)output->data, strideNum, blockNum, sp, power, isExp); } } else if (stride != 1 && stride * blockNum > 4096){ //GDevs->GetGridAndBlockSize2D(devID, stride * blockNum, strideNum,MAX_INT, cudaGridSize, cudaBlockSize); //unsigned int* goutput = (unsigned int *)input->data; //convert2uintV2 << <dim3(cudaGridSize[0], cudaGridSize[1]), dim3(cudaBlockSize[0], cudaBlockSize[1]) >> > ((float*)input->data, goutput, stride, strideNum, blockNum, strideNum*blockNum*stride); dim3 grid, block; discontinuousStorageNoShareMemThreadAllocation(&grid, &block, stride, blockNum); hipLaunchKernelGGL(( KernelReduceSumDiscontinuousStorage) , dim3(grid), dim3(block), 0, 0, (DTYPE *)input->data, (DTYPE*)output->data, stride, strideNum, blockNum,sp, power, isExp); } else { do { if (input->dataType == DEFAULT_DTYPE) { DTYPE * iData = NULL; DTYPE * oData = NULL; if (iter == 0) { iData = (DTYPE*)input->data; oData = buf1; } else if (iter % 2 == 1) { iData = buf1; oData = buf2; } else { iData = buf2; oData = buf1; } /* unroll the reduction procedure. The code is messy but it is faster. */ if (strideNum <= 32) { GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (cudaGridSize[0] == 1) oData = (DTYPE*)output->data; hipLaunchKernelGGL(( KernelReduceSum) , dim3(blocks), dim3(threads), 0, 0, iData, oData, stride, strideNum, blocks.x, blockSize, blockNum, sp, power, isExp); } else if (strideNum < 128) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 64), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (cudaGridSize[0] == 1) oData = (DTYPE*)output->data; CheckNTErrors((cudaBlockSize[0] >= 64), "Incorrect thread number when calling the cuda kernel!"); adjustThreadForUseWarpOptimization(&blocks, &threads); hipLaunchKernelGGL(( KernelReduceSumFast<64>) , dim3(blocks), dim3(threads), 0, 0, iData, oData, stride, strideNum, blocks.x, blockSize, blockNum, sp, power, isExp); } else if (strideNum < 256) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 128), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (cudaGridSize[0] == 1) oData = (DTYPE*)output->data; CheckNTErrors((cudaBlockSize[0] >= 128), "Incorrect thread number when calling the cuda kernel!"); adjustThreadForUseWarpOptimization(&blocks, &threads); hipLaunchKernelGGL(( KernelReduceSumFast<128>) , dim3(blocks), dim3(threads), 0, 0, iData, oData, stride, strideNum, blocks.x, blockSize, blockNum, sp, power, isExp); } else if (strideNum < 512) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 256), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (cudaGridSize[0] == 1) oData = (DTYPE*)output->data; CheckNTErrors((cudaBlockSize[0] >= 256), "Incorrect thread number when calling the cuda kernel!"); adjustThreadForUseWarpOptimization(&blocks, &threads); hipLaunchKernelGGL(( KernelReduceSumFast<256>) , dim3(blocks), dim3(threads), 0, 0, iData, oData, stride, strideNum, blocks.x, blockSize, blockNum, sp, power, isExp); } else { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 512), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (cudaGridSize[0] == 1) oData = (DTYPE*)output->data; CheckNTErrors((cudaBlockSize[0] >= 512), "Incorrect thread number when calling the cuda kernel!"); adjustThreadForUseWarpOptimization(&blocks, &threads); hipLaunchKernelGGL(( KernelReduceSumFast<512>) , dim3(blocks), dim3(threads), 0, 0, iData, oData, stride, strideNum, blocks.x, blockSize, blockNum, sp, power, isExp); } } else if (input->dataType == X_FLOAT16) { __half * buf1ft16 = (__half *)buf1; __half * buf2ft16 = (__half *)buf2; __half * spft16 = (__half *)sp; unsigned short power2 = FloatToFloat16(power); __half * powerft16p = (__half*)&power2; __half * iData = NULL; __half * oData = NULL; if (iter == 0) { iData = (__half*)input->data; oData = buf1ft16; } else if (iter % 2 == 1) { iData = buf1ft16; oData = buf2ft16; } else { iData = buf2ft16; oData = buf1ft16; } /* unroll the reduction procedure. The code is messy but it is faster. */ if (strideNum < 32) { GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); if (cudaGridSize[0] == 1) oData = (__half*)output->data; hipLaunchKernelGGL(( KernelReduceSum) , dim3(blocks), dim3(threads), 0, 0, iData, oData, stride, strideNum, blocks.y, blockSize, blockNum, spft16, *powerft16p, isExp); } else if (strideNum < 128) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 64), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); if (cudaGridSize[0] == 1) oData = (__half*)output->data; CheckNTErrors((cudaBlockSize[0] >= 64), "Incorrect thread number when calling the cuda kernel!"); hipLaunchKernelGGL(( KernelReduceSumFast<64>) , dim3(blocks), dim3(threads), 0, 0, iData, oData, stride, strideNum, blocks.y, blockSize, blockNum, spft16, *powerft16p, isExp); } else if (strideNum < 256) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 128), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); if (cudaGridSize[0] == 1) oData = (__half*)output->data; CheckNTErrors((cudaBlockSize[0] >= 128), "Incorrect thread number when calling the cuda kernel!"); hipLaunchKernelGGL(( KernelReduceSumFast<128>) , dim3(blocks), dim3(threads), 0, 0, iData, oData, stride, strideNum, blocks.y, blockSize, blockNum, spft16, *powerft16p, isExp); } else if (strideNum < 512) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 256), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); if (cudaGridSize[0] == 1) oData = (__half*)output->data; CheckNTErrors((cudaBlockSize[0] >= 256), "Incorrect thread number when calling the cuda kernel!"); hipLaunchKernelGGL(( KernelReduceSumFast<256>) , dim3(blocks), dim3(threads), 0, 0, iData, oData, stride, strideNum, blocks.y, blockSize, blockNum, spft16, *powerft16p, isExp); } else { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 512), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); if (cudaGridSize[0] == 1) oData = (__half*)output->data; CheckNTErrors((cudaBlockSize[0] >= 512), "Incorrect thread number when calling the cuda kernel!"); hipLaunchKernelGGL(( KernelReduceSumFast<512>) , dim3(blocks), dim3(threads), 0, 0, iData, oData, stride, strideNum, blocks.y, blockSize, blockNum, spft16, *powerft16p, isExp); } } strideNum = cudaGridSize[0]; blockSize = cudaGridSize[0]; sp = NULL; power = (DTYPE)1.0; isExp = false; iter++; } while (strideNum > 1); } ProtectCudaDev(input->devID, devIDBackup); if (mem != NULL) mem->ReleaseBuf(mem->devID, bufSize); else XMemFree(input->devID, buf); } #endif // USE_ROCM } // namespace nts(NiuTrans.Tensor)
c2ecc577d78ac45ff083bfa26d633bd16d7c00e9.cu
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "../../XDevice.h" #include "../../XUtility.h" #include "ReduceSum.cuh" namespace nts{ // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* use PTX code to reduce float data */ __device__ __forceinline__ float shflDownReduceSum(float input) { float output; asm volatile( "{" ".reg .f32 r0;" "shfl.down.b32 r0, %1, 0x10, 0x1f;" "add.f32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x8, 0xf;" "add.f32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x4, 0x7;" "add.f32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x2, 0x3;" "add.f32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x1, 0x1;" "add.f32 %0, r0, %1;" "}" : "=f"(output) : "f"(input)); return output; } /* use PTX code to reduce int data */ __device__ __forceinline__ int shflDownReduceSum(int input) { int output; asm volatile( "{" ".reg .s32 r0;" "shfl.down.b32 r0, %1, 0x10, 0x1f;" "add.s32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x8, 0xf;" "add.s32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x4, 0x7;" "add.s32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x2, 0x3;" "add.s32 %1, r0, %1;" "shfl.down.b32 r0, %1, 0x1, 0x1;" "add.s32 %0, r0, %1;" "}" : "=r"(output) : "r"(input)); return output; } /* reduce a tensor to another that keeps the sum along a dimension - slow version Given a block of data, we go over each dimension i in the stride and we have sum_i = sum_{0<=j<strideNum} exp(input_{i,j} - shift) if isExp == true; = sum_{0<=j<strideNum} input_{i,j} - shift if isExp == false; where we can view the block as a matrix and input_{i,j} represent the item at the crossing of the i-th columne and the j-th row. >> input - the input array (representing a tensor) >> output - the sum over each block. NOTE: output is also an array >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to finish the reduce >> reducedStrideNum - the number of strides after reducation >> blockSize - size of the block (i.e., stride * strideNum) >> blockNum - how many blocks >> shift - the bias imposed on the input >> power - power of the item in the array >> isExp - specify if we perform exp() on the input */ __global__ void KernelReduceSum(DTYPE * input, DTYPE * output, int stride, int strideNum, int reducedStrideNum, int blockSize, int blockNum, DTYPE * shift, DTYPE power, bool isExp) { __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int idx = threadIdx.y * blockDim.x + threadIdx.x; unsigned int i = blockIdx.y*blockDim.y + threadIdx.y; unsigned int j = blockIdx.x*blockDim.x + threadIdx.x; if(i >= stride * blockNum) return; if(threadIdx.x == 0) bias[threadIdx.y] = shift != NULL ? shift[i] : 0; __syncthreads(); int k = i / stride; int iOffset = i % stride; bool isValid = (i < stride * blockNum && j < strideNum); DTYPE value = isValid ? input[blockSize * k + stride * j + iOffset] - bias[threadIdx.y] : 0; if(power != (DTYPE)1.0){ if(power == (DTYPE)2.0) value = value * value; else if(power == (DTYPE)0.5) value = sqrt(value); else value = pow(value, power); } if(isExp && isValid) value = exp(value); /* load data into the shared mem */ iData[threadIdx.y * blockDim.x + threadIdx.x] = value; __syncthreads(); /* do reduction in shared mem */ for (unsigned int s = blockDim.x/2; s > 0; s >>= 1){ if (threadIdx.x < s) iData[idx] += iData[idx + s]; __syncthreads(); } /* write result for this block to the output array */ if (threadIdx.x == 0 && blockIdx.x < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.x) * stride + iOffset] = iData[threadIdx.y * blockDim.x]; } /* reduce a tensor to another that keeps the sum along a dimension - slow version This is for float16 reduction. Given a block of data, we go over each dimension i in the stride and we have sum_i = sum_{0<=j<strideNum} exp(input_{i,j} - shift) if isExp == true; = sum_{0<=j<strideNum} input_{i,j} - shift if isExp == false; where we can view the block as a matrix and input_{i,j} represent the item at the crossing of the i-th columne and the j-th row. >> input - the input array (representing a tensor) >> output - the sum over each block. NOTE: output is also an array >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to finish the reduce >> reducedStrideNum - the number of strides after reducation >> blockSize - size of the block (i.e., stride * strideNum) >> blockNum - how many blocks >> shift - the bias imposed on the input >> power - power of the item in the array >> isExp - specify if we perform exp() on the input */ __global__ void KernelReduceSum(__half * input, __half * output, int stride, int strideNum, int reducedStrideNum, int blockSize, int blockNum, __half * shift, __half power, bool isExp) { int idx = threadIdx.x * blockDim.y + threadIdx.y; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if(i >= stride * blockNum) return; #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) __shared__ __half iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; __shared__ __half bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; if(threadIdx.y == 0) bias[threadIdx.x] = shift != NULL ? shift[i] : __half(0); #else __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; if(threadIdx.y == 0) bias[threadIdx.x] = shift != NULL ? __half(shift[i]) : __half(0); #endif __syncthreads(); int k = i / stride; int iOffset = i % stride; bool isValid = (i < stride * blockNum && j < strideNum); #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) __half value = isValid ? __hsub(input[blockSize * k + stride * j + iOffset], bias[threadIdx.x]) : __half(0); DTYPE power2 = __half2float(power); if(power2 != (DTYPE)1.0){ if(power2 == (DTYPE)2.0) value = __hmul(value, value); else if(power2 == (DTYPE)0.5) value = hsqrt(value); } if(isExp && isValid) value = hexp(value); #else DTYPE value = isValid ? __half2float(input[blockSize * k + stride * j + iOffset]) - __half2float(bias[threadIdx.x]) : 0; DTYPE power2 = __half2float(power); if(power2 != (DTYPE)1.0){ if(power2 == (DTYPE)2.0) value = value * value; else if(power2 == (DTYPE)0.5) value = sqrt(value); else value = pow(value, power2); } if(isExp && isValid) value = exp(value); #endif /* load data into the shared mem */ iData[threadIdx.x * blockDim.y + threadIdx.y] = value; __syncthreads(); /* do reduction in shared mem */ for (unsigned int s = blockDim.y/2; s > 0; s >>= 1){ if (threadIdx.y < s) iData[idx] += iData[idx + s]; __syncthreads(); } #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) /* write result for this block to the output array */ if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = iData[threadIdx.x * blockDim.y]; #else /* write result for this block to the output array */ if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = __half(iData[threadIdx.x * blockDim.y]); #endif } /* reduce a tensor to another that keeps the sum along a dimension - fast version >> input - the input array (representing a tensor) >> output - the sum over each block. NOTE: output is also an array >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to finish the reduce >> reducedStrideNum - the number of strides after reducation >> blockSize - size of the block (i.e., stride * strideNum) >> blockNum - how many blocks >> shift - the bias imposed on the input >> power - power of the item in the array >> isExp - specify if we perform exp() on the input */ template <unsigned int goodSize> __global__ void KernelReduceSumFast(DTYPE * input, DTYPE * output, int stride, int strideNum, int reducedStrideNum, int blockSize, int blockNum, DTYPE * shift, DTYPE power, bool isExp) { __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; unsigned int tid = threadIdx.x; unsigned int j = blockIdx.x * (blockDim.x * 2) + threadIdx.x; unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; if(i >= stride * blockNum) return; if (threadIdx.x == 0) bias[threadIdx.y] = shift != NULL ? shift[i] : 0; __syncthreads(); /* first level reduction */ int k = i / stride; int iOffset = i % stride; bool isValid = j < strideNum; bool isValid2 = j + blockDim.x < strideNum; DTYPE * data = iData + threadIdx.y * blockDim.x; DTYPE * inputData = input + k * blockSize; DTYPE value = isValid ? inputData[j * stride + iOffset] - bias[threadIdx.y]: 0; DTYPE value2 = isValid2 ? inputData[(j + blockDim.x) * stride + iOffset] - bias[threadIdx.y]: 0; if(power != (DTYPE)1.0){ if(power == (DTYPE)2.0){ value = value * value; value2 = value2 * value2; } else if(power == (DTYPE)0.5){ value = sqrt(value); value2 = sqrt(value2); } else{ value = pow(value, power); value2 = pow(value2, power); } } if(isExp){ if(isValid) value = exp(value); if(isValid2) value2 = exp(value2); } value = value + value2; __syncthreads(); value = shflDownReduceSum(value); if ((tid & 0x1f) == 0) data[tid / 32] = value; __syncthreads(); if (tid < 32){ if (tid < blockDim.x / 32) value = data[tid]; else value = 0; value = shflDownReduceSum(value); if (tid == 0 && blockIdx.x < reducedStrideNum) { output[(k * reducedStrideNum + blockIdx.x) * stride + iOffset] = value; } } } /* reduce a tensor to another that keeps the sum along a dimension - fast version This is for float16 reduction >> input - the input array (representing a tensor) >> output - the sum over each block. NOTE: output is also an array >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to finish the reduce >> reducedStrideNum - the number of strides after reducation >> blockSize - size of the block (i.e., stride * strideNum) >> blockNum - how many blocks >> shift - the bias imposed on the input >> power - power of the item in the array >> isExp - specify if we perform exp() on the input */ template <unsigned int goodSize> __global__ void KernelReduceSumFast(__half * input, __half * output, int stride, int strideNum, int reducedStrideNum, int blockSize, int blockNum, __half * shift, __half power, bool isExp) { unsigned int tid = threadIdx.y; unsigned int j = blockIdx.y * (blockDim.y * 2) + threadIdx.y; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= stride * blockNum) return; #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) __shared__ __half iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ __half bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; if(threadIdx.y == 0) bias[threadIdx.x] = shift != NULL ? shift[i] : __float2half(0); #else __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; if(threadIdx.y == 0) bias[threadIdx.x] = shift != NULL ? __half2float(shift[i]) : 0; #endif __syncthreads(); /* first level reduction */ int k = i / stride; int iOffset = i % stride; bool isValid = j < strideNum; bool isValid2 = j + blockDim.y < strideNum; #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) __half * data = iData + threadIdx.x * blockDim.y; __half * inputData = input + k * blockSize; __half value = isValid ? __hsub(inputData[j * stride + iOffset], bias[threadIdx.x]) : __float2half(0); __half value2 = isValid2 ? __hsub(inputData[(j + blockDim.y) * stride + iOffset], bias[threadIdx.x]) : __float2half(0); DTYPE powerf = __half2float(power); if(powerf != (DTYPE)1.0){ if(powerf == (DTYPE)2.0){ value = __hmul(value, value); value2 = __hmul(value2, value2); } else if(powerf == (DTYPE)0.5){ value = hsqrt(value); value2 = hsqrt(value2); } } if(isExp){ if(isValid) value = hexp(value); if(isValid2) value2 = hexp(value2); } #else DTYPE * data = iData + threadIdx.x * blockDim.y; __half * inputData = input + k * blockSize; DTYPE value = isValid ? __half2float(inputData[j * stride + iOffset]) - __half2float(bias[threadIdx.x]): 0; DTYPE value2 = isValid2 ? __half2float(inputData[(j + blockDim.y) * stride + iOffset]) - __half2float(bias[threadIdx.x]): 0; DTYPE powerf = __half2float(power); if(powerf != (DTYPE)1.0){ if(powerf == (DTYPE)2.0){ value = value * value; value2 = value2 *value2; } else if(powerf == (DTYPE)0.5){ value = sqrt(value); value2 = sqrt(value2); } else{ value = pow(value, powerf); value2 = pow(value2, powerf); } } if(isExp){ if(isValid) value = exp(value); if(isValid2) value2 = exp(value2); } #endif /* load data into the shared mem */ data[tid] = value + value2; __syncthreads(); /* unroll the warp */ if(goodSize >= 512) {if(tid < 256) {data[tid] += data[tid + 256];} __syncthreads();} if(goodSize >= 256) {if(tid < 128) {data[tid] += data[tid + 128];} __syncthreads();} if(goodSize >= 128) {if(tid < 64) {data[tid] += data[tid + 64];} __syncthreads();} if(goodSize >= 64) {if(tid < 32) {data[tid] += data[tid + 32];} __syncthreads();} if(goodSize >= 32) {if(tid < 16) {data[tid] += data[tid + 16];} __syncthreads();} if(goodSize >= 16) {if(tid < 8) {data[tid] += data[tid + 8];} __syncthreads();} if(goodSize >= 8) {if(tid < 4) {data[tid] += data[tid + 4];} __syncthreads();} if(goodSize >= 4) {if(tid < 2) {data[tid] += data[tid + 2];} __syncthreads();} if(goodSize >= 2) {if(tid < 1) {data[tid] += data[tid + 1];} __syncthreads();} #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) /* write result for this block to the output array */ if(threadIdx.y == 0 && blockIdx.y < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = data[0]; #else /* write result for this block to the output array */ if(threadIdx.y == 0 && blockIdx.y < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = __float2half(data[0]); #endif } /* if data storage is discontinuius ,use this way to reduce */ __global__ void KernelReduceSumDiscontinuousStorage(DTYPE * input, DTYPE * output, int stride, int strideNum, int blockNum, DTYPE * shift, DTYPE power, bool isExp) { __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int idx = blockDim.x * blockIdx.x + threadIdx.x; int blockIndex = idx / stride; int offsetInBlock = idx % stride; if (idx >= stride * blockNum) return; bias[idx % blockDim.x] = shift != NULL ? shift[idx] : 0; DTYPE ans = 0; #pragma unroll for (int i = stride * strideNum * blockIndex + offsetInBlock; i < stride * strideNum * blockIndex + offsetInBlock + stride * strideNum; i += stride){ DTYPE value = input[i]; value = value - bias[idx % blockDim.x]; if (power != (DTYPE)1.0) { if (power == (DTYPE)2.0) { value = value * value; } else if (power == (DTYPE)0.5) { value = sqrt(value); } else { value = pow(value, power); } } if (isExp) { value = exp(value); } ans += value; } output[idx] = ans; } __global__ void KernelReduceSumOp(DTYPE * input, DTYPE * output, int stride, int strideNum, int reducedStrideNum, int blockSize, int blockNum, DTYPE * shift, DTYPE power, bool isExp) { __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK / 32]; __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; unsigned int tid = threadIdx.y; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= stride * blockNum) return; if (threadIdx.y == 0) bias[threadIdx.x] = shift != NULL ? shift[i] : 0; __syncthreads(); /* first level reduction */ int k = i / stride; int iOffset = i % stride; DTYPE threadSum = 0; DTYPE * data = iData + threadIdx.x * blockDim.y; DTYPE * inputData = input + k * blockSize; for (int it = j; it < strideNum; it += blockDim.y){ DTYPE value = inputData[it * stride + iOffset] - bias[threadIdx.x]; if (power != (DTYPE)1.0) { if (power == (DTYPE)2.0) { value = value * value; } else if (power == (DTYPE)0.5) { value = sqrt(value); } else { value = pow(value, power); } } if (isExp) value = exp(value); threadSum += value; } __syncthreads(); threadSum = shflDownReduceSum(threadSum); if ((tid & 0x1f) == 0) { data[tid / 32] = threadSum; } __syncthreads(); if (tid < 32){ if (tid < blockDim.y / 32) threadSum = data[tid]; else threadSum = 0; threadSum = shflDownReduceSum(threadSum); if (tid == 0 && blockIdx.y < reducedStrideNum) output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = threadSum; } } __global__ void KernelReduceSumOpLessBlocks(DTYPE * input, DTYPE * output, int strideNum, int blockNum, DTYPE * shift, DTYPE power, bool isExp) { __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; int idx = threadIdx.x % 32; int idy = (blockIdx.x * blockDim.x + threadIdx.x) / 32; if (idx == 0) bias[threadIdx.x / 32] = shift != NULL ? shift[idy] : 0; int startIndex = idy * strideNum; DTYPE threadSum = 0; for (int i = idx; i < strideNum; i += 32) { DTYPE value = input[startIndex + i] - bias[threadIdx.x / 32]; if (power != (DTYPE)1.0) { if (power == (DTYPE)2.0) { value = value * value; } else if (power == (DTYPE)0.5) { value = sqrt(value); } else { value = pow(value, power); } } if (isExp) value = exp(value); threadSum += value; } threadSum = shflDownReduceSum(threadSum); if (idx == 0) output[idy] = threadSum; } /* according the GPU's sm number allocation warp num */ inline void continuousStorageThreadAllocation(dim3& grid, dim3& block, long long vectorNum, int vectorSize) { int warpNum = 4; if (vectorNum < 20 * 8) { warpNum = 8; if (vectorNum < 20 * 4) { warpNum = 16; if (warpNum < 20 * 2) warpNum = 32; } } int minWarpNum = vectorSize / 32; if (vectorSize % 32 != 0) minWarpNum++; warpNum = min(warpNum, minWarpNum); grid.x = (unsigned int)vectorNum; grid.y = 1; grid.z = 1; block.x = 1; block.y = warpNum * 32; block.z = 1; } /* this situation we use block.x * grid.x deal one vector for continuous read */ void discontinuousStorageNoShareMemThreadAllocation(dim3* grid, dim3* block, int stride, int blockNum) { block->x = 512; block->y = 1; if ((stride * blockNum) % 512 == 0) grid->x = (stride * blockNum) / 512; else grid->x = (stride * blockNum) / 512 + 1; grid->y = 1; } /* adjust threads.x number then we can use warp optimization */ void adjustThreadForUseWarpOptimization(dim3* blocks, dim3* threads) { if (threads->y > 1){ blocks->y *= threads->y; threads->y = 1; } if (threads->x < 32) threads->x = 32; } /* sum the items along a dimension of the tensor (cuda version). For a 1-dimensional data array a, sum = \sum_i (a_i - shift)^power if isExp == false sum = \sum_i exp((a_i - shift)^power) if isExp == true >> input - the input tensor >> output - the output tensor >> dim - which dimension to reduce >> shift - the bias on the input >> power - we perform pow(item_i, power) on each item >> ieExp - specify if the exp() is performed */ void _CudaReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor * shift, DTYPE power, bool isExp) { CheckNTErrors(input && output, "Empty input or output tensors!"); CheckNTErrors(input->order == output->order + 1, "Incorrect tensor sizes!"); CheckNTErrors(input->order > dim && dim >= 0, "Illegal dimension to reduce!"); CheckNTErrors(input->dataType == output->dataType, "Unmatched data types!"); CheckNTErrors(shift == NULL || output->unitNum == shift->unitNum, "Incorrect shift tensor size!"); int dimRDI = input->order - dim - 1; for(int i = 0; i < input->order; i++){ if(i < dimRDI){ CheckNTErrors(input->dimSizeRDI[i] == output->dimSizeRDI[i], "Unmatched tensors!"); } else if(i > dimRDI){ CheckNTErrors(input->dimSizeRDI[i] == output->dimSizeRDI[i - 1], "Unmatched tensors!"); } } if(input->dataType == X_FLOAT16) CheckNTErrors(power == 0 || power == 0.5 || power == 1.0 || power == 2.0, "TODO!"); int cudaGridSize[3]; int cudaBlockSize[3]; int iter = 0; int stride = 1; int strideNum = input->dimSizeRDI[dimRDI]; int blockSize = 1; int blockNum = 1; for (int i = 0; i < input->order; i++) { if (i < dimRDI) stride *= input->dimSizeRDI[i]; else if (i > dimRDI) blockNum *= input->dimSizeRDI[i]; } blockSize = stride * strideNum; int devID = input->devID; XMem * mem = input->mem; GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); int bufSize = input->unitSize * cudaGridSize[0] * stride * blockNum * 2; DTYPE * buf = mem != NULL ? (DTYPE*)mem->AllocBuf(mem->devID, bufSize) : (DTYPE*)XMemAlloc(input->devID, bufSize); DTYPE * buf1 = buf; DTYPE * buf2 = buf + cudaGridSize[0] * stride * blockNum; DTYPE * sp = shift != NULL ? (DTYPE*)shift->data : NULL; int devIDBackup; ProtectCudaDev(input->devID, devIDBackup); if (stride == 1 && blockNum >= 10) { dim3 grids; dim3 blocks; continuousStorageThreadAllocation(grids, blocks, (long long)blockNum, strideNum); if (blocks.y >= 128) KernelReduceSumOp <<<grids, blocks>>> ((DTYPE *)input->data, (DTYPE*)output->data, stride, strideNum, grids.y, blockSize, blockNum, sp, power, isExp); else { if (blockNum % 4 != 0) blockNum = (int)(blockNum / 4) + 1; else blockNum = blockNum / 4; KernelReduceSumOpLessBlocks <<<blockNum, 128>>> ((DTYPE *)input->data, (DTYPE*)output->data, strideNum, blockNum, sp, power, isExp); } } else if (stride != 1 && stride * blockNum > 4096){ //GDevs->GetGridAndBlockSize2D(devID, stride * blockNum, strideNum,MAX_INT, cudaGridSize, cudaBlockSize); //unsigned int* goutput = (unsigned int *)input->data; //convert2uintV2 << <dim3(cudaGridSize[0], cudaGridSize[1]), dim3(cudaBlockSize[0], cudaBlockSize[1]) >> > ((float*)input->data, goutput, stride, strideNum, blockNum, strideNum*blockNum*stride); dim3 grid, block; discontinuousStorageNoShareMemThreadAllocation(&grid, &block, stride, blockNum); KernelReduceSumDiscontinuousStorage <<<grid, block>>> ((DTYPE *)input->data, (DTYPE*)output->data, stride, strideNum, blockNum,sp, power, isExp); } else { do { if (input->dataType == DEFAULT_DTYPE) { DTYPE * iData = NULL; DTYPE * oData = NULL; if (iter == 0) { iData = (DTYPE*)input->data; oData = buf1; } else if (iter % 2 == 1) { iData = buf1; oData = buf2; } else { iData = buf2; oData = buf1; } /* unroll the reduction procedure. The code is messy but it is faster. */ if (strideNum <= 32) { GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (cudaGridSize[0] == 1) oData = (DTYPE*)output->data; KernelReduceSum <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.x, blockSize, blockNum, sp, power, isExp); } else if (strideNum < 128) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 64), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (cudaGridSize[0] == 1) oData = (DTYPE*)output->data; CheckNTErrors((cudaBlockSize[0] >= 64), "Incorrect thread number when calling the cuda kernel!"); adjustThreadForUseWarpOptimization(&blocks, &threads); KernelReduceSumFast<64> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.x, blockSize, blockNum, sp, power, isExp); } else if (strideNum < 256) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 128), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (cudaGridSize[0] == 1) oData = (DTYPE*)output->data; CheckNTErrors((cudaBlockSize[0] >= 128), "Incorrect thread number when calling the cuda kernel!"); adjustThreadForUseWarpOptimization(&blocks, &threads); KernelReduceSumFast<128> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.x, blockSize, blockNum, sp, power, isExp); } else if (strideNum < 512) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 256), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (cudaGridSize[0] == 1) oData = (DTYPE*)output->data; CheckNTErrors((cudaBlockSize[0] >= 256), "Incorrect thread number when calling the cuda kernel!"); adjustThreadForUseWarpOptimization(&blocks, &threads); KernelReduceSumFast<256> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.x, blockSize, blockNum, sp, power, isExp); } else { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 512), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); if (cudaGridSize[0] == 1) oData = (DTYPE*)output->data; CheckNTErrors((cudaBlockSize[0] >= 512), "Incorrect thread number when calling the cuda kernel!"); adjustThreadForUseWarpOptimization(&blocks, &threads); KernelReduceSumFast<512> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.x, blockSize, blockNum, sp, power, isExp); } } else if (input->dataType == X_FLOAT16) { __half * buf1ft16 = (__half *)buf1; __half * buf2ft16 = (__half *)buf2; __half * spft16 = (__half *)sp; unsigned short power2 = FloatToFloat16(power); __half * powerft16p = (__half*)&power2; __half * iData = NULL; __half * oData = NULL; if (iter == 0) { iData = (__half*)input->data; oData = buf1ft16; } else if (iter % 2 == 1) { iData = buf1ft16; oData = buf2ft16; } else { iData = buf2ft16; oData = buf1ft16; } /* unroll the reduction procedure. The code is messy but it is faster. */ if (strideNum < 32) { GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); if (cudaGridSize[0] == 1) oData = (__half*)output->data; KernelReduceSum <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum, spft16, *powerft16p, isExp); } else if (strideNum < 128) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 64), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); if (cudaGridSize[0] == 1) oData = (__half*)output->data; CheckNTErrors((cudaBlockSize[0] >= 64), "Incorrect thread number when calling the cuda kernel!"); KernelReduceSumFast<64> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum, spft16, *powerft16p, isExp); } else if (strideNum < 256) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 128), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); if (cudaGridSize[0] == 1) oData = (__half*)output->data; CheckNTErrors((cudaBlockSize[0] >= 128), "Incorrect thread number when calling the cuda kernel!"); KernelReduceSumFast<128> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum, spft16, *powerft16p, isExp); } else if (strideNum < 512) { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 256), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); if (cudaGridSize[0] == 1) oData = (__half*)output->data; CheckNTErrors((cudaBlockSize[0] >= 256), "Incorrect thread number when calling the cuda kernel!"); KernelReduceSumFast<256> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum, spft16, *powerft16p, isExp); } else { GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 512), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); if (cudaGridSize[0] == 1) oData = (__half*)output->data; CheckNTErrors((cudaBlockSize[0] >= 512), "Incorrect thread number when calling the cuda kernel!"); KernelReduceSumFast<512> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum, spft16, *powerft16p, isExp); } } strideNum = cudaGridSize[0]; blockSize = cudaGridSize[0]; sp = NULL; power = (DTYPE)1.0; isExp = false; iter++; } while (strideNum > 1); } ProtectCudaDev(input->devID, devIDBackup); if (mem != NULL) mem->ReleaseBuf(mem->devID, bufSize); else XMemFree(input->devID, buf); } #endif // USE_CUDA } // namespace nts(NiuTrans.Tensor)
7a60070deb2bcb249c859f28ac5c620c804a2a30.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> const int N = 1 << 20; #define FLTSIZE sizeof(float) inline int BLK(int data, int blocksize) { return (data + blocksize - 1) / blocksize; } __global__ void kernel_vectorAdd (const float* __restrict__ a_d, const float* __restrict__ b_d, const int N, float *c_d) { int tid = threadIdx.x + __mul24(blockIdx.x, blockDim.x); if(tid < N) { c_d[tid] = a_d[tid] + b_d[tid]; } } int main( int argc, char **argv) { int num_streams = 8; if(argc == 2) num_streams = atoi(argv[1]); int num_threads = num_streams; printf("\nrunning %d cuda streams (and threads)\n", num_streams); // allocate streams hipStream_t *streams = (hipStream_t *) malloc(num_streams * sizeof(hipStream_t)); // init for (int i = 0; i < num_streams; i++) { checkCudaErrors(hipStreamCreate(&(streams[i]))); } //------------------------------------------------------------------------// // allocate data on the host //------------------------------------------------------------------------// size_t databytes = N * FLTSIZE; float *a_h = (float*) malloc ( N * num_streams * FLTSIZE); float *b_h = (float*) malloc ( N * num_streams * FLTSIZE); float *c_h = (float*) malloc ( N * num_streams * FLTSIZE); for(int i=0; i< N * num_streams; i++) { a_h[i] = 1.1f; b_h[i] = 2.2f; } //------------------------------------------------------------------------// // allocate data on the device //------------------------------------------------------------------------// float *a_d; float *b_d; float *c_d; hipMalloc((void**)&a_d, N * num_streams * FLTSIZE); hipMalloc((void**)&b_d, N * num_streams * FLTSIZE); hipMalloc((void**)&c_d, N * num_streams * FLTSIZE); omp_set_num_threads(num_threads); #pragma omp parallel { checkCudaErrors(hipSetDevice(0)); int id = omp_get_thread_num(); size_t offset = id * N; // kernel configuration dim3 threads = dim3(256, 1, 1); dim3 blocks = dim3(BLK(N, threads.x), 1, 1); // copy data to deivce hipMemcpyAsync(a_d + offset, a_h + offset, databytes, hipMemcpyHostToDevice, streams[id]); hipMemcpyAsync(b_d + offset, b_h + offset, databytes, hipMemcpyHostToDevice, streams[id]); // launch one worker kernel per stream hipLaunchKernelGGL(( kernel_vectorAdd) , dim3(blocks), dim3(threads), 0, streams[id] , &a_d[offset], &b_d[offset], N, &c_d[offset]); // copy data back to host hipMemcpyAsync(c_h + offset, c_d + offset, databytes, hipMemcpyDeviceToHost, streams[id]); } // check data bool success = 1; for(int i=0; i< N * num_streams; i++) { if (abs(c_h[i] - 3.3f) > 1e-6) { fprintf(stderr, "%d : %f (error)!\n", i, c_h[i]); success = 0; break; } } if(success) { printf("\nSuccess!\nExit.\n"); } //------------------------------------------------------------------------// // free //------------------------------------------------------------------------// for (int i = 0; i < num_streams; i++) { checkCudaErrors(hipStreamDestroy(streams[i])); } free(a_h); free(b_h); free(c_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); hipDeviceReset(); return 0; }
7a60070deb2bcb249c859f28ac5c620c804a2a30.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <cuda_runtime.h> #include <helper_cuda.h> const int N = 1 << 20; #define FLTSIZE sizeof(float) inline int BLK(int data, int blocksize) { return (data + blocksize - 1) / blocksize; } __global__ void kernel_vectorAdd (const float* __restrict__ a_d, const float* __restrict__ b_d, const int N, float *c_d) { int tid = threadIdx.x + __mul24(blockIdx.x, blockDim.x); if(tid < N) { c_d[tid] = a_d[tid] + b_d[tid]; } } int main( int argc, char **argv) { int num_streams = 8; if(argc == 2) num_streams = atoi(argv[1]); int num_threads = num_streams; printf("\nrunning %d cuda streams (and threads)\n", num_streams); // allocate streams cudaStream_t *streams = (cudaStream_t *) malloc(num_streams * sizeof(cudaStream_t)); // init for (int i = 0; i < num_streams; i++) { checkCudaErrors(cudaStreamCreate(&(streams[i]))); } //------------------------------------------------------------------------// // allocate data on the host //------------------------------------------------------------------------// size_t databytes = N * FLTSIZE; float *a_h = (float*) malloc ( N * num_streams * FLTSIZE); float *b_h = (float*) malloc ( N * num_streams * FLTSIZE); float *c_h = (float*) malloc ( N * num_streams * FLTSIZE); for(int i=0; i< N * num_streams; i++) { a_h[i] = 1.1f; b_h[i] = 2.2f; } //------------------------------------------------------------------------// // allocate data on the device //------------------------------------------------------------------------// float *a_d; float *b_d; float *c_d; cudaMalloc((void**)&a_d, N * num_streams * FLTSIZE); cudaMalloc((void**)&b_d, N * num_streams * FLTSIZE); cudaMalloc((void**)&c_d, N * num_streams * FLTSIZE); omp_set_num_threads(num_threads); #pragma omp parallel { checkCudaErrors(cudaSetDevice(0)); int id = omp_get_thread_num(); size_t offset = id * N; // kernel configuration dim3 threads = dim3(256, 1, 1); dim3 blocks = dim3(BLK(N, threads.x), 1, 1); // copy data to deivce cudaMemcpyAsync(a_d + offset, a_h + offset, databytes, cudaMemcpyHostToDevice, streams[id]); cudaMemcpyAsync(b_d + offset, b_h + offset, databytes, cudaMemcpyHostToDevice, streams[id]); // launch one worker kernel per stream kernel_vectorAdd <<< blocks, threads, 0, streams[id] >>> (&a_d[offset], &b_d[offset], N, &c_d[offset]); // copy data back to host cudaMemcpyAsync(c_h + offset, c_d + offset, databytes, cudaMemcpyDeviceToHost, streams[id]); } // check data bool success = 1; for(int i=0; i< N * num_streams; i++) { if (abs(c_h[i] - 3.3f) > 1e-6) { fprintf(stderr, "%d : %f (error)!\n", i, c_h[i]); success = 0; break; } } if(success) { printf("\nSuccess!\nExit.\n"); } //------------------------------------------------------------------------// // free //------------------------------------------------------------------------// for (int i = 0; i < num_streams; i++) { checkCudaErrors(cudaStreamDestroy(streams[i])); } free(a_h); free(b_h); free(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaDeviceReset(); return 0; }
c269d2d2e422fea2d19ba9c52be086d6c2317090.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #if !defined CUDA_DISABLER #include <thrust/device_ptr.h> #include <thrust/sort.h> #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/utility.hpp" namespace cv { namespace gpu { namespace device { namespace gfft { texture<float, hipTextureType2D, hipReadModeElementType> eigTex(0, hipFilterModePoint, hipAddressModeClamp); __device__ uint g_counter = 0; template <class Mask> __global__ void findCorners(float threshold, const Mask mask, float2* corners, uint max_count, int rows, int cols) { #if __CUDA_ARCH__ >= 110 const int j = blockIdx.x * blockDim.x + threadIdx.x; const int i = blockIdx.y * blockDim.y + threadIdx.y; if (i > 0 && i < rows - 1 && j > 0 && j < cols - 1 && mask(i, j)) { float val = tex2D(eigTex, j, i); if (val > threshold) { float maxVal = val; maxVal = ::fmax(tex2D(eigTex, j - 1, i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j , i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j - 1, i), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i), maxVal); maxVal = ::fmax(tex2D(eigTex, j - 1, i + 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j , i + 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i + 1), maxVal); if (val == maxVal) { const uint ind = atomicInc(&g_counter, (uint)(-1)); if (ind < max_count) corners[ind] = make_float2(j, i); } } } #endif // __CUDA_ARCH__ >= 110 } int findCorners_gpu(PtrStepSzf eig, float threshold, PtrStepSzb mask, float2* corners, int max_count) { void* counter_ptr; cudaSafeCall( hipGetSymbolAddress(&counter_ptr, g_counter) ); cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(uint)) ); bindTexture(&eigTex, eig); dim3 block(16, 16); dim3 grid(divUp(eig.cols, block.x), divUp(eig.rows, block.y)); if (mask.data) hipLaunchKernelGGL(( findCorners), dim3(grid), dim3(block), 0, 0, threshold, SingleMask(mask), corners, max_count, eig.rows, eig.cols); else hipLaunchKernelGGL(( findCorners), dim3(grid), dim3(block), 0, 0, threshold, WithOutMask(), corners, max_count, eig.rows, eig.cols); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); uint count; cudaSafeCall( hipMemcpy(&count, counter_ptr, sizeof(uint), hipMemcpyDeviceToHost) ); return min(count, max_count); } class EigGreater { public: __device__ __forceinline__ bool operator()(float2 a, float2 b) const { return tex2D(eigTex, a.x, a.y) > tex2D(eigTex, b.x, b.y); } }; void sortCorners_gpu(PtrStepSzf eig, float2* corners, int count) { bindTexture(&eigTex, eig); thrust::device_ptr<float2> ptr(corners); thrust::sort(ptr, ptr + count, EigGreater()); } } // namespace optical_flow }}} #endif /* CUDA_DISABLER */
c269d2d2e422fea2d19ba9c52be086d6c2317090.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #if !defined CUDA_DISABLER #include <thrust/device_ptr.h> #include <thrust/sort.h> #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/utility.hpp" namespace cv { namespace gpu { namespace device { namespace gfft { texture<float, cudaTextureType2D, cudaReadModeElementType> eigTex(0, cudaFilterModePoint, cudaAddressModeClamp); __device__ uint g_counter = 0; template <class Mask> __global__ void findCorners(float threshold, const Mask mask, float2* corners, uint max_count, int rows, int cols) { #if __CUDA_ARCH__ >= 110 const int j = blockIdx.x * blockDim.x + threadIdx.x; const int i = blockIdx.y * blockDim.y + threadIdx.y; if (i > 0 && i < rows - 1 && j > 0 && j < cols - 1 && mask(i, j)) { float val = tex2D(eigTex, j, i); if (val > threshold) { float maxVal = val; maxVal = ::fmax(tex2D(eigTex, j - 1, i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j , i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i - 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j - 1, i), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i), maxVal); maxVal = ::fmax(tex2D(eigTex, j - 1, i + 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j , i + 1), maxVal); maxVal = ::fmax(tex2D(eigTex, j + 1, i + 1), maxVal); if (val == maxVal) { const uint ind = atomicInc(&g_counter, (uint)(-1)); if (ind < max_count) corners[ind] = make_float2(j, i); } } } #endif // __CUDA_ARCH__ >= 110 } int findCorners_gpu(PtrStepSzf eig, float threshold, PtrStepSzb mask, float2* corners, int max_count) { void* counter_ptr; cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) ); cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(uint)) ); bindTexture(&eigTex, eig); dim3 block(16, 16); dim3 grid(divUp(eig.cols, block.x), divUp(eig.rows, block.y)); if (mask.data) findCorners<<<grid, block>>>(threshold, SingleMask(mask), corners, max_count, eig.rows, eig.cols); else findCorners<<<grid, block>>>(threshold, WithOutMask(), corners, max_count, eig.rows, eig.cols); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); uint count; cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(uint), cudaMemcpyDeviceToHost) ); return min(count, max_count); } class EigGreater { public: __device__ __forceinline__ bool operator()(float2 a, float2 b) const { return tex2D(eigTex, a.x, a.y) > tex2D(eigTex, b.x, b.y); } }; void sortCorners_gpu(PtrStepSzf eig, float2* corners, int count) { bindTexture(&eigTex, eig); thrust::device_ptr<float2> ptr(corners); thrust::sort(ptr, ptr + count, EigGreater()); } } // namespace optical_flow }}} #endif /* CUDA_DISABLER */
b4e3fcb7d05c7f18d649d43c5a2de8b21fdb99c9.hip
// !!! This is a file automatically generated by hipify!!! // wave 2D GPU // compile: nvcc -arch=sm_70 -O3 wave_2D_v3.cu // run: ./a.out #include "stdio.h" #include "stdlib.h" #include "math.h" #include "hip/hip_runtime.h" #define USE_SINGLE_PRECISION /* Comment this line using "//" if you want to use double precision. */ #ifdef USE_SINGLE_PRECISION #define DAT float #define PRECIS 4 #else #define DAT double #define PRECIS 8 #endif #define GPU_ID 3 #define OVERLENGTH_X 1 #define OVERLENGTH_Y 1 #define zeros(A,nx,ny) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc(((nx)*(ny))*sizeof(DAT)); \ for(i=0; i < ((nx)*(ny)); i++){ A##_h[i]=(DAT)0.0; } \ hipMalloc(&A##_d ,((nx)*(ny))*sizeof(DAT)); \ hipMemcpy( A##_d,A##_h,((nx)*(ny))*sizeof(DAT),hipMemcpyHostToDevice); #define free_all(A) free(A##_h); hipFree(A##_d); #define gather(A,nx,ny) hipMemcpy( A##_h,A##_d,((nx)*(ny))*sizeof(DAT),hipMemcpyDeviceToHost); #define for_ix int ix = blockIdx.x*blockDim.x + threadIdx.x; #define for_iy int iy = blockIdx.y*blockDim.y + threadIdx.y; #define Pres(ix,iy) ( P[ix + (iy)*nx ]) #define Velx(ix,iy) (Vx[ix + (iy)*(nx+1)]) #define Vely(ix,iy) (Vy[ix + (iy)*nx ]) // --------------------------------------------------------------------- // // Physics const DAT Lx = 10.0; const DAT Ly = 10.0; const DAT k = 1.0; const DAT rho = 1.0; // Numerics #define BLOCK_X 32 #define BLOCK_Y 32 #define GRID_X 320 #define GRID_Y 320 const int nx = BLOCK_X*GRID_X - OVERLENGTH_X; const int ny = BLOCK_Y*GRID_Y - OVERLENGTH_Y; const int nt = 200; const DAT dx = Lx/((DAT)nx); const DAT dy = Ly/((DAT)ny); const DAT dt = min(dx,dy)/sqrt(k/rho)/4.1; // --------------------------------------------------------------------- // void save_info(int me, const int nx, const int ny){ FILE* fid; fid=fopen("0_nxy.inf" ,"w"); fprintf(fid,"%d %d %d", PRECIS, nx, ny); fclose(fid); } #define save_info() save_info(me, nx, ny); void save_array(DAT* A, int nx, int ny, int me, const char A_name[]){ char* fname; FILE* fid; asprintf(&fname, "%d_%s.res" , me, A_name); fid=fopen(fname, "wb"); fwrite(A, sizeof(DAT), (nx)*(ny), fid); fclose(fid); free(fname); } #define SaveArray(A,nx,ny,A_name) gather(A,nx,ny); save_array(A##_h,nx,ny,me,A_name); void clean_cuda(){ hipError_t ce = hipGetLastError(); if(ce != hipSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", hipGetErrorString(ce)); hipDeviceReset();} } // Timer #include "sys/time.h" double timer_start = 0; double cpu_sec(){ struct timeval tp; gettimeofday(&tp,NULL); return tp.tv_sec+1e-6*tp.tv_usec; } void tic(){ timer_start = cpu_sec(); } double toc(){ return cpu_sec()-timer_start; } void tim(const char *what, double n){ double s=toc(); printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n"); } // --------------------------------------------------------------------- // // Computing physics kernels __global__ void init(DAT* x, DAT* y, DAT* P, const DAT Lx, const DAT Ly, const DAT dx, const DAT dy, const int nx, const int ny){ for_ix for_iy if (iy<ny && ix<nx){ x[ix + iy*nx] = (DAT)ix*dx + (-Lx+dx)/2.0; } if (iy<ny && ix<nx){ y[ix + iy*nx] = (DAT)iy*dy + (-Ly+dy)/2.0; } if (iy<ny && ix<nx){ P[ix + iy*nx] = exp(-(x[ix + iy*nx]*x[ix + iy*nx]) -(y[ix + iy*nx]*y[ix + iy*nx])); } } __global__ void compute_V(DAT* Vx, DAT* Vy, DAT* P, const DAT dt, const DAT rho, const DAT dx, const DAT dy, const int nx, const int ny){ for_ix for_iy if (iy<ny && ix>0 && ix<nx){ Velx(ix,iy) = Velx(ix,iy) - dt*(Pres(ix,iy)-Pres(ix-1,iy))/dx/rho; } if (iy>0 && iy<ny && ix<nx){ Vely(ix,iy) = Vely(ix,iy) - dt*(Pres(ix,iy)-Pres(ix,iy-1))/dy/rho; } } __global__ void compute_P(DAT* Vx, DAT* Vy, DAT* P, const DAT dt, const DAT k, const DAT dx, const DAT dy, const int nx, const int ny){ for_ix for_iy if (iy<ny && ix<nx){ Pres(ix,iy) = Pres(ix,iy) - dt*k*((Velx(ix+1,iy)-Velx(ix,iy))/dx + (Vely(ix,iy+1)-Vely(ix,iy))/dy ); } } int main(){ int i, it; size_t N=nx*ny, mem=N*sizeof(DAT); // Set up GPU int gpu_id=-1; dim3 grid, block; block.x = BLOCK_X; grid.x = GRID_X; block.y = BLOCK_Y; grid.y = GRID_Y; gpu_id = GPU_ID; hipSetDevice(gpu_id); hipGetDevice(&gpu_id); hipDeviceReset(); hipDeviceSetCacheConfig(hipFuncCachePreferL1); // set L1 to prefered printf("Process uses GPU with id %d.\n",gpu_id); printf("%dx%d, %1.3f GB, %d iterations.\n", nx,ny, 5*mem/1024./1024./1024., nt); printf("Launching (%dx%d) grid of (%dx%d) blocks.\n", grid.x, grid.y, block.x, block.y); // Initial arrays zeros(x ,nx ,ny ); zeros(y ,nx ,ny ); zeros(P ,nx ,ny ); zeros(Vx ,nx+1,ny ); zeros(Vy ,nx ,ny+1); // Initial conditions hipLaunchKernelGGL(( init), dim3(grid),dim3(block), 0, 0, x_d, y_d, P_d, Lx, Ly, dx, dy, nx, ny); hipDeviceSynchronize(); // Action for (it=0;it<nt;it++){ if (it==3){ tic(); } hipLaunchKernelGGL(( compute_V), dim3(grid),dim3(block), 0, 0, Vx_d, Vy_d, P_d, dt, rho, dx, dy, nx, ny); hipDeviceSynchronize(); hipLaunchKernelGGL(( compute_P), dim3(grid),dim3(block), 0, 0, Vx_d, Vy_d, P_d, dt, k, dx, dy, nx, ny); hipDeviceSynchronize(); }//it tim("Time (s), Effective MTP (GB/s)", mem*(nt-3)*4/1024./1024./1024.); // save_info(); // SaveArray(P ,nx ,ny ,"P" ); // SaveArray(Vx,nx+1,ny ,"Vx"); // SaveArray(Vy,nx ,ny+1,"Vy"); free_all(x ); free_all(y ); free_all(P ); free_all(Vx); free_all(Vy); clean_cuda(); }
b4e3fcb7d05c7f18d649d43c5a2de8b21fdb99c9.cu
// wave 2D GPU // compile: nvcc -arch=sm_70 -O3 wave_2D_v3.cu // run: ./a.out #include "stdio.h" #include "stdlib.h" #include "math.h" #include "cuda.h" #define USE_SINGLE_PRECISION /* Comment this line using "//" if you want to use double precision. */ #ifdef USE_SINGLE_PRECISION #define DAT float #define PRECIS 4 #else #define DAT double #define PRECIS 8 #endif #define GPU_ID 3 #define OVERLENGTH_X 1 #define OVERLENGTH_Y 1 #define zeros(A,nx,ny) DAT *A##_d,*A##_h; A##_h = (DAT*)malloc(((nx)*(ny))*sizeof(DAT)); \ for(i=0; i < ((nx)*(ny)); i++){ A##_h[i]=(DAT)0.0; } \ cudaMalloc(&A##_d ,((nx)*(ny))*sizeof(DAT)); \ cudaMemcpy( A##_d,A##_h,((nx)*(ny))*sizeof(DAT),cudaMemcpyHostToDevice); #define free_all(A) free(A##_h); cudaFree(A##_d); #define gather(A,nx,ny) cudaMemcpy( A##_h,A##_d,((nx)*(ny))*sizeof(DAT),cudaMemcpyDeviceToHost); #define for_ix int ix = blockIdx.x*blockDim.x + threadIdx.x; #define for_iy int iy = blockIdx.y*blockDim.y + threadIdx.y; #define Pres(ix,iy) ( P[ix + (iy)*nx ]) #define Velx(ix,iy) (Vx[ix + (iy)*(nx+1)]) #define Vely(ix,iy) (Vy[ix + (iy)*nx ]) // --------------------------------------------------------------------- // // Physics const DAT Lx = 10.0; const DAT Ly = 10.0; const DAT k = 1.0; const DAT rho = 1.0; // Numerics #define BLOCK_X 32 #define BLOCK_Y 32 #define GRID_X 320 #define GRID_Y 320 const int nx = BLOCK_X*GRID_X - OVERLENGTH_X; const int ny = BLOCK_Y*GRID_Y - OVERLENGTH_Y; const int nt = 200; const DAT dx = Lx/((DAT)nx); const DAT dy = Ly/((DAT)ny); const DAT dt = min(dx,dy)/sqrt(k/rho)/4.1; // --------------------------------------------------------------------- // void save_info(int me, const int nx, const int ny){ FILE* fid; fid=fopen("0_nxy.inf" ,"w"); fprintf(fid,"%d %d %d", PRECIS, nx, ny); fclose(fid); } #define save_info() save_info(me, nx, ny); void save_array(DAT* A, int nx, int ny, int me, const char A_name[]){ char* fname; FILE* fid; asprintf(&fname, "%d_%s.res" , me, A_name); fid=fopen(fname, "wb"); fwrite(A, sizeof(DAT), (nx)*(ny), fid); fclose(fid); free(fname); } #define SaveArray(A,nx,ny,A_name) gather(A,nx,ny); save_array(A##_h,nx,ny,me,A_name); void clean_cuda(){ cudaError_t ce = cudaGetLastError(); if(ce != cudaSuccess){ printf("ERROR launching GPU C-CUDA program: %s\n", cudaGetErrorString(ce)); cudaDeviceReset();} } // Timer #include "sys/time.h" double timer_start = 0; double cpu_sec(){ struct timeval tp; gettimeofday(&tp,NULL); return tp.tv_sec+1e-6*tp.tv_usec; } void tic(){ timer_start = cpu_sec(); } double toc(){ return cpu_sec()-timer_start; } void tim(const char *what, double n){ double s=toc(); printf("%s: %8.3f seconds",what,s);if(n>0)printf(", %8.3f GB/s", n/s); printf("\n"); } // --------------------------------------------------------------------- // // Computing physics kernels __global__ void init(DAT* x, DAT* y, DAT* P, const DAT Lx, const DAT Ly, const DAT dx, const DAT dy, const int nx, const int ny){ for_ix for_iy if (iy<ny && ix<nx){ x[ix + iy*nx] = (DAT)ix*dx + (-Lx+dx)/2.0; } if (iy<ny && ix<nx){ y[ix + iy*nx] = (DAT)iy*dy + (-Ly+dy)/2.0; } if (iy<ny && ix<nx){ P[ix + iy*nx] = exp(-(x[ix + iy*nx]*x[ix + iy*nx]) -(y[ix + iy*nx]*y[ix + iy*nx])); } } __global__ void compute_V(DAT* Vx, DAT* Vy, DAT* P, const DAT dt, const DAT rho, const DAT dx, const DAT dy, const int nx, const int ny){ for_ix for_iy if (iy<ny && ix>0 && ix<nx){ Velx(ix,iy) = Velx(ix,iy) - dt*(Pres(ix,iy)-Pres(ix-1,iy))/dx/rho; } if (iy>0 && iy<ny && ix<nx){ Vely(ix,iy) = Vely(ix,iy) - dt*(Pres(ix,iy)-Pres(ix,iy-1))/dy/rho; } } __global__ void compute_P(DAT* Vx, DAT* Vy, DAT* P, const DAT dt, const DAT k, const DAT dx, const DAT dy, const int nx, const int ny){ for_ix for_iy if (iy<ny && ix<nx){ Pres(ix,iy) = Pres(ix,iy) - dt*k*((Velx(ix+1,iy)-Velx(ix,iy))/dx + (Vely(ix,iy+1)-Vely(ix,iy))/dy ); } } int main(){ int i, it; size_t N=nx*ny, mem=N*sizeof(DAT); // Set up GPU int gpu_id=-1; dim3 grid, block; block.x = BLOCK_X; grid.x = GRID_X; block.y = BLOCK_Y; grid.y = GRID_Y; gpu_id = GPU_ID; cudaSetDevice(gpu_id); cudaGetDevice(&gpu_id); cudaDeviceReset(); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); // set L1 to prefered printf("Process uses GPU with id %d.\n",gpu_id); printf("%dx%d, %1.3f GB, %d iterations.\n", nx,ny, 5*mem/1024./1024./1024., nt); printf("Launching (%dx%d) grid of (%dx%d) blocks.\n", grid.x, grid.y, block.x, block.y); // Initial arrays zeros(x ,nx ,ny ); zeros(y ,nx ,ny ); zeros(P ,nx ,ny ); zeros(Vx ,nx+1,ny ); zeros(Vy ,nx ,ny+1); // Initial conditions init<<<grid,block>>>(x_d, y_d, P_d, Lx, Ly, dx, dy, nx, ny); cudaDeviceSynchronize(); // Action for (it=0;it<nt;it++){ if (it==3){ tic(); } compute_V<<<grid,block>>>(Vx_d, Vy_d, P_d, dt, rho, dx, dy, nx, ny); cudaDeviceSynchronize(); compute_P<<<grid,block>>>(Vx_d, Vy_d, P_d, dt, k, dx, dy, nx, ny); cudaDeviceSynchronize(); }//it tim("Time (s), Effective MTP (GB/s)", mem*(nt-3)*4/1024./1024./1024.); // save_info(); // SaveArray(P ,nx ,ny ,"P" ); // SaveArray(Vx,nx+1,ny ,"Vx"); // SaveArray(Vy,nx ,ny+1,"Vy"); free_all(x ); free_all(y ); free_all(P ); free_all(Vx); free_all(Vy); clean_cuda(); }
e7b1c91f27a49fe93e5ca19bc5ede849d68b5fae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #ifdef HAVEHALF #include "hip/hip_fp16.h" #endif extern "C" void THError(const char *fmt, ...); extern "C" int cuda_maphostmem; #define errcheck(f) do {int rc = f; if(rc) THError("Error %d in line %s:%d", rc, __FILE__, __LINE__); } while(0) #define BYTE2FLOAT 0.003921568f // 1/255 __global__ void grayscale2float_kernel(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { dst[threadIdx.x + blockIdx.x * width] = (src[threadIdx.x + srcstride*blockIdx.x] * BYTE2FLOAT - mean[0]) / std[0]; } __global__ void rgb2float_kernel(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { int c; for(c = 0; c < 3; c++) { dst[4*threadIdx.x + (blockIdx.x + c * height) * width] = (src[c + 3*4*threadIdx.x + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+1 + (blockIdx.x + c * height) * width] = (src[c + 3*(4*threadIdx.x+1) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+2 + (blockIdx.x + c * height) * width] = (src[c + 3*(4*threadIdx.x+2) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+3 + (blockIdx.x + c * height) * width] = (src[c + 3*(4*threadIdx.x+3) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; } } __global__ void bgr2float_kernel(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { int c; for(c = 0; c < 3; c++) { dst[4*threadIdx.x + (blockIdx.x + c * height) * width] = (src[2-c + 3*4*threadIdx.x + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+1 + (blockIdx.x + c * height) * width] = (src[2-c + 3*(4*threadIdx.x+1) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+2 + (blockIdx.x + c * height) * width] = (src[2-c + 3*(4*threadIdx.x+2) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+3 + (blockIdx.x + c * height) * width] = (src[2-c + 3*(4*threadIdx.x+3) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; } } __global__ void rgb2half_kernel(unsigned short *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { int c; for(c = 0; c < 3; c++) { dst[4*threadIdx.x + (blockIdx.x + c * height) * width] = __float2half_rn((src[c + 3*4*threadIdx.x + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+1 + (blockIdx.x + c * height) * width] = __float2half_rn((src[c + 3*(4*threadIdx.x+1) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+2 + (blockIdx.x + c * height) * width] = __float2half_rn((src[c + 3*(4*threadIdx.x+2) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+3 + (blockIdx.x + c * height) * width] = __float2half_rn((src[c + 3*(4*threadIdx.x+3) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); } } __global__ void bgr2half_kernel(unsigned short *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { int c; for(c = 0; c < 3; c++) { dst[4*threadIdx.x + (blockIdx.x + c * height) * width] = __float2half_rn((src[2-c + 3*4*threadIdx.x + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+1 + (blockIdx.x + c * height) * width] = __float2half_rn((src[2-c + 3*(4*threadIdx.x+1) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+2 + (blockIdx.x + c * height) * width] = __float2half_rn((src[2-c + 3*(4*threadIdx.x+2) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+3 + (blockIdx.x + c * height) * width] = __float2half_rn((src[2-c + 3*(4*threadIdx.x+3) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); } } extern "C" float *cuda_grayscale2float(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std); extern "C" float *cuda_rgb2float(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std, const int bgr); extern "C" unsigned short *cuda_rgb2half(unsigned short *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std, const int bgr); float *cuda_grayscale2float(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { unsigned char *csrc; float *cmean, *cstd; if(cuda_maphostmem) { if(cuda_maphostmem == 2) errcheck(hipHostRegister((void *)src, height*srcstride, hipHostRegisterMapped)); errcheck(hipHostGetDevicePointer((void **)&csrc, (void *)src, 0)); } else { errcheck(hipMalloc((void **)&csrc, height * srcstride)); errcheck(hipMemcpy(csrc, src, height * srcstride, hipMemcpyHostToDevice)); } errcheck(hipMalloc((void **)&cmean, sizeof(*cmean))); errcheck(hipMemcpy(cmean, mean, sizeof(*cmean), hipMemcpyHostToDevice)); errcheck(hipMalloc((void **)&cstd, sizeof(*cstd))); errcheck(hipMemcpy(cstd, std, sizeof(*std), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( grayscale2float_kernel), dim3(height), dim3(width), 0, 0, dst, csrc, width, height, srcstride, cmean, cstd); errcheck(hipDeviceSynchronize()); if(cuda_maphostmem == 2) hipHostUnregister((void *)src); else if(cuda_maphostmem == 0) hipFree(csrc); hipFree(cmean); hipFree(cstd); return dst; } float *cuda_rgb2float(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std, const int bgr) { unsigned char *csrc; float *cmean, *cstd; if(cuda_maphostmem) { if(cuda_maphostmem == 2) errcheck(hipHostRegister((void *)src, height*srcstride, hipHostRegisterMapped)); errcheck(hipHostGetDevicePointer((void **)&csrc, (void *)src, 0)); } else { errcheck(hipMalloc((void **)&csrc, height * srcstride)); errcheck(hipMemcpy(csrc, src, height * srcstride, hipMemcpyHostToDevice)); } errcheck(hipMalloc((void **)&cmean, 3 * sizeof(*cmean))); errcheck(hipMemcpy(cmean, mean, 3 * sizeof(*cmean), hipMemcpyHostToDevice)); errcheck(hipMalloc((void **)&cstd, 3 * sizeof(*cstd))); errcheck(hipMemcpy(cstd, std, 3 * sizeof(*std), hipMemcpyHostToDevice)); if(bgr) hipLaunchKernelGGL(( bgr2float_kernel), dim3(height), dim3(width/4), 0, 0, dst, csrc, width, height, srcstride, cmean, cstd); elsehipLaunchKernelGGL(( rgb2float_kernel), dim3(height), dim3(width/4), 0, 0, dst, csrc, width, height, srcstride, cmean, cstd); errcheck(hipDeviceSynchronize()); if(cuda_maphostmem == 2) hipHostUnregister((void *)src); else if(cuda_maphostmem == 0) hipFree(csrc); hipFree(cmean); hipFree(cstd); return dst; } unsigned short *cuda_rgb2half(unsigned short *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std, const int bgr) { unsigned char *csrc; float *cmean, *cstd; if(cuda_maphostmem) { if(cuda_maphostmem == 2) errcheck(hipHostRegister((void *)src, height*srcstride, hipHostRegisterMapped)); errcheck(hipHostGetDevicePointer((void **)&csrc, (void *)src, 0)); } else { errcheck(hipMalloc((void **)&csrc, height * srcstride)); errcheck(hipMemcpy(csrc, src, height * srcstride, hipMemcpyHostToDevice)); } errcheck(hipMalloc((void **)&cmean, 3 * sizeof(*cmean))); errcheck(hipMemcpy(cmean, mean, 3 * sizeof(*cmean), hipMemcpyHostToDevice)); errcheck(hipMalloc((void **)&cstd, 3 * sizeof(*cstd))); errcheck(hipMemcpy(cstd, std, 3 * sizeof(*std), hipMemcpyHostToDevice)); if(bgr) hipLaunchKernelGGL(( bgr2half_kernel), dim3(height), dim3(width/4), 0, 0, dst, csrc, width, height, srcstride, cmean, cstd); elsehipLaunchKernelGGL(( rgb2half_kernel), dim3(height), dim3(width/4), 0, 0, dst, csrc, width, height, srcstride, cmean, cstd); errcheck(hipDeviceSynchronize()); if(cuda_maphostmem) hipHostUnregister((void *)src); else hipFree(csrc); hipFree(cmean); hipFree(cstd); return dst; } __global__ void fillwithone(float *dst, const int stride) { dst[threadIdx.x + blockIdx.x * stride] = 1; } #ifdef HAVEHALF __global__ void fillwithoneH(__half *dst, const int stride) { dst[threadIdx.x + blockIdx.x * stride] = __float2half(1); } #endif extern "C" void cuda_fillwithone(const int n1, const int n2, float *data, const int stride); extern "C" void cuda_fillwithoneH(const int n1, const int n2, float *data, const int stride); void cuda_fillwithone(const int n1, const int n2, float *data, const int stride) { hipLaunchKernelGGL(( fillwithone), dim3(n1), dim3(n2), 0, 0, data, stride); } #ifdef HAVEHALF void cuda_fillwithoneH(const int n1, const int n2, float *data, const int stride) { hipLaunchKernelGGL(( fillwithoneH), dim3(n1), dim3(n2), 0, 0, (__half *)data, stride); } #endif
e7b1c91f27a49fe93e5ca19bc5ede849d68b5fae.cu
#include <stdio.h> #ifdef HAVEHALF #include "cuda_fp16.h" #endif extern "C" void THError(const char *fmt, ...); extern "C" int cuda_maphostmem; #define errcheck(f) do {int rc = f; if(rc) THError("Error %d in line %s:%d", rc, __FILE__, __LINE__); } while(0) #define BYTE2FLOAT 0.003921568f // 1/255 __global__ void grayscale2float_kernel(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { dst[threadIdx.x + blockIdx.x * width] = (src[threadIdx.x + srcstride*blockIdx.x] * BYTE2FLOAT - mean[0]) / std[0]; } __global__ void rgb2float_kernel(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { int c; for(c = 0; c < 3; c++) { dst[4*threadIdx.x + (blockIdx.x + c * height) * width] = (src[c + 3*4*threadIdx.x + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+1 + (blockIdx.x + c * height) * width] = (src[c + 3*(4*threadIdx.x+1) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+2 + (blockIdx.x + c * height) * width] = (src[c + 3*(4*threadIdx.x+2) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+3 + (blockIdx.x + c * height) * width] = (src[c + 3*(4*threadIdx.x+3) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; } } __global__ void bgr2float_kernel(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { int c; for(c = 0; c < 3; c++) { dst[4*threadIdx.x + (blockIdx.x + c * height) * width] = (src[2-c + 3*4*threadIdx.x + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+1 + (blockIdx.x + c * height) * width] = (src[2-c + 3*(4*threadIdx.x+1) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+2 + (blockIdx.x + c * height) * width] = (src[2-c + 3*(4*threadIdx.x+2) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; dst[4*threadIdx.x+3 + (blockIdx.x + c * height) * width] = (src[2-c + 3*(4*threadIdx.x+3) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]; } } __global__ void rgb2half_kernel(unsigned short *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { int c; for(c = 0; c < 3; c++) { dst[4*threadIdx.x + (blockIdx.x + c * height) * width] = __float2half_rn((src[c + 3*4*threadIdx.x + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+1 + (blockIdx.x + c * height) * width] = __float2half_rn((src[c + 3*(4*threadIdx.x+1) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+2 + (blockIdx.x + c * height) * width] = __float2half_rn((src[c + 3*(4*threadIdx.x+2) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+3 + (blockIdx.x + c * height) * width] = __float2half_rn((src[c + 3*(4*threadIdx.x+3) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); } } __global__ void bgr2half_kernel(unsigned short *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { int c; for(c = 0; c < 3; c++) { dst[4*threadIdx.x + (blockIdx.x + c * height) * width] = __float2half_rn((src[2-c + 3*4*threadIdx.x + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+1 + (blockIdx.x + c * height) * width] = __float2half_rn((src[2-c + 3*(4*threadIdx.x+1) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+2 + (blockIdx.x + c * height) * width] = __float2half_rn((src[2-c + 3*(4*threadIdx.x+2) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); dst[4*threadIdx.x+3 + (blockIdx.x + c * height) * width] = __float2half_rn((src[2-c + 3*(4*threadIdx.x+3) + srcstride*blockIdx.x] * BYTE2FLOAT - mean[c]) / std[c]); } } extern "C" float *cuda_grayscale2float(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std); extern "C" float *cuda_rgb2float(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std, const int bgr); extern "C" unsigned short *cuda_rgb2half(unsigned short *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std, const int bgr); float *cuda_grayscale2float(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std) { unsigned char *csrc; float *cmean, *cstd; if(cuda_maphostmem) { if(cuda_maphostmem == 2) errcheck(cudaHostRegister((void *)src, height*srcstride, cudaHostRegisterMapped)); errcheck(cudaHostGetDevicePointer((void **)&csrc, (void *)src, 0)); } else { errcheck(cudaMalloc((void **)&csrc, height * srcstride)); errcheck(cudaMemcpy(csrc, src, height * srcstride, cudaMemcpyHostToDevice)); } errcheck(cudaMalloc((void **)&cmean, sizeof(*cmean))); errcheck(cudaMemcpy(cmean, mean, sizeof(*cmean), cudaMemcpyHostToDevice)); errcheck(cudaMalloc((void **)&cstd, sizeof(*cstd))); errcheck(cudaMemcpy(cstd, std, sizeof(*std), cudaMemcpyHostToDevice)); grayscale2float_kernel<<<height, width>>>(dst, csrc, width, height, srcstride, cmean, cstd); errcheck(cudaDeviceSynchronize()); if(cuda_maphostmem == 2) cudaHostUnregister((void *)src); else if(cuda_maphostmem == 0) cudaFree(csrc); cudaFree(cmean); cudaFree(cstd); return dst; } float *cuda_rgb2float(float *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std, const int bgr) { unsigned char *csrc; float *cmean, *cstd; if(cuda_maphostmem) { if(cuda_maphostmem == 2) errcheck(cudaHostRegister((void *)src, height*srcstride, cudaHostRegisterMapped)); errcheck(cudaHostGetDevicePointer((void **)&csrc, (void *)src, 0)); } else { errcheck(cudaMalloc((void **)&csrc, height * srcstride)); errcheck(cudaMemcpy(csrc, src, height * srcstride, cudaMemcpyHostToDevice)); } errcheck(cudaMalloc((void **)&cmean, 3 * sizeof(*cmean))); errcheck(cudaMemcpy(cmean, mean, 3 * sizeof(*cmean), cudaMemcpyHostToDevice)); errcheck(cudaMalloc((void **)&cstd, 3 * sizeof(*cstd))); errcheck(cudaMemcpy(cstd, std, 3 * sizeof(*std), cudaMemcpyHostToDevice)); if(bgr) bgr2float_kernel<<<height, width/4>>>(dst, csrc, width, height, srcstride, cmean, cstd); else rgb2float_kernel<<<height, width/4>>>(dst, csrc, width, height, srcstride, cmean, cstd); errcheck(cudaDeviceSynchronize()); if(cuda_maphostmem == 2) cudaHostUnregister((void *)src); else if(cuda_maphostmem == 0) cudaFree(csrc); cudaFree(cmean); cudaFree(cstd); return dst; } unsigned short *cuda_rgb2half(unsigned short *dst, const unsigned char *src, const int width, const int height, const int srcstride, const float *mean, const float *std, const int bgr) { unsigned char *csrc; float *cmean, *cstd; if(cuda_maphostmem) { if(cuda_maphostmem == 2) errcheck(cudaHostRegister((void *)src, height*srcstride, cudaHostRegisterMapped)); errcheck(cudaHostGetDevicePointer((void **)&csrc, (void *)src, 0)); } else { errcheck(cudaMalloc((void **)&csrc, height * srcstride)); errcheck(cudaMemcpy(csrc, src, height * srcstride, cudaMemcpyHostToDevice)); } errcheck(cudaMalloc((void **)&cmean, 3 * sizeof(*cmean))); errcheck(cudaMemcpy(cmean, mean, 3 * sizeof(*cmean), cudaMemcpyHostToDevice)); errcheck(cudaMalloc((void **)&cstd, 3 * sizeof(*cstd))); errcheck(cudaMemcpy(cstd, std, 3 * sizeof(*std), cudaMemcpyHostToDevice)); if(bgr) bgr2half_kernel<<<height, width/4>>>(dst, csrc, width, height, srcstride, cmean, cstd); else rgb2half_kernel<<<height, width/4>>>(dst, csrc, width, height, srcstride, cmean, cstd); errcheck(cudaDeviceSynchronize()); if(cuda_maphostmem) cudaHostUnregister((void *)src); else cudaFree(csrc); cudaFree(cmean); cudaFree(cstd); return dst; } __global__ void fillwithone(float *dst, const int stride) { dst[threadIdx.x + blockIdx.x * stride] = 1; } #ifdef HAVEHALF __global__ void fillwithoneH(__half *dst, const int stride) { dst[threadIdx.x + blockIdx.x * stride] = __float2half(1); } #endif extern "C" void cuda_fillwithone(const int n1, const int n2, float *data, const int stride); extern "C" void cuda_fillwithoneH(const int n1, const int n2, float *data, const int stride); void cuda_fillwithone(const int n1, const int n2, float *data, const int stride) { fillwithone<<<n1, n2>>>(data, stride); } #ifdef HAVEHALF void cuda_fillwithoneH(const int n1, const int n2, float *data, const int stride) { fillwithoneH<<<n1, n2>>>((__half *)data, stride); } #endif
4320533115a978bb4d949ba6c5e13a0594bbb2bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void ge_floor (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < fd); if (valid) { b[offset_b + gid_0 + gid_1 * ld_b] = CAST(floor)(a[offset_a + gid_0 + gid_1 * ld_a]); } }
4320533115a978bb4d949ba6c5e13a0594bbb2bf.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void ge_floor (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < fd); if (valid) { b[offset_b + gid_0 + gid_1 * ld_b] = CAST(floor)(a[offset_a + gid_0 + gid_1 * ld_a]); } }
db84a17b57d88ffbd1497f4f171fba64e1024557.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <UnitTest++.h> #include <memory> #include "MonteRay_GridSystemInterface.hh" #include "UnitTestHelper.hh" #include <array> using namespace MonteRay; namespace GridSystemInterfaceTester { SUITE(GridSystemInterface_test) { class GridSystemInterfaceTester{ public: std::unique_ptr<MonteRay_GridSystemInterface> pGrid; GridSystemInterfaceTester(){ std::array<MonteRay_GridBins, 3> gridBins{ MonteRay_GridBins(0, 10, 10, MonteRay_GridBins::RADIAL), MonteRay_GridBins(-10, 10, 20), MonteRay_GridBins(-10, 10, 30) }; pGrid = std::make_unique<MonteRay_GridSystemInterface>(gridBins, 3); } }; TEST_FIXTURE(GridSystemInterfaceTester, ConstructorsAndGetters){ CHECK_EQUAL(3, pGrid->getDimension()); CHECK_EQUAL(10, pGrid->getNumGridBins(0)); CHECK_EQUAL(20, pGrid->getNumGridBins(1)); CHECK_EQUAL(30, pGrid->getNumGridBins(2)); CHECK_EQUAL(1, pGrid->getMinVertex(0)); CHECK_EQUAL(-10, pGrid->getMinVertex(1)); CHECK_EQUAL(10, pGrid->getMaxVertex(0)); CHECK_EQUAL(10, pGrid->getMaxVertex(1)); CHECK_EQUAL(-4.0, pGrid->getVertex(1, 6)); CHECK_EQUAL(10, pGrid->getNumVertices(0)); CHECK_EQUAL(10, pGrid->getNumVerticesSq(0)); CHECK_EQUAL(0, pGrid->getNumVerticesSq(1)); CHECK_EQUAL(10*20*30, pGrid->getNumCells()); std::array<int, 3> indices{9, 19, 29}; CHECK(not pGrid->isOutside(indices.data())); indices = {10, 19, 29}; CHECK(pGrid->isOutside(indices.data())); indices = {9, 20, 29}; CHECK(pGrid->isOutside(indices.data())); indices = {9, 19, 30}; CHECK(pGrid->isOutside(indices.data())); indices = {0, 0, 0}; CHECK_EQUAL(0, pGrid->calcIndex(indices)); indices = {0, 2, 0}; CHECK_EQUAL(10*2, pGrid->calcIndex(indices)); indices = {0, 0, 2}; CHECK_EQUAL(10*20*2, pGrid->calcIndex(indices)); indices = {9, 19, 29}; CHECK_EQUAL(10*20*30 - 1, pGrid->calcIndex(indices)); } #ifdef __HIPCC__ __global__ void testSpatialGrid(bool* testVal, MonteRay_GridSystemInterface* pGrid) { *testVal = true; GPU_CHECK_EQUAL(3, pGrid->getDimension()); GPU_CHECK_EQUAL(10, pGrid->getNumGridBins(0)); GPU_CHECK_EQUAL(20, pGrid->getNumGridBins(1)); GPU_CHECK_EQUAL(30, pGrid->getNumGridBins(2)); GPU_CHECK_EQUAL(1, pGrid->getMinVertex(0)); GPU_CHECK_EQUAL(-10, pGrid->getMinVertex(1)); GPU_CHECK_EQUAL(10, pGrid->getMaxVertex(0)); GPU_CHECK_EQUAL(10, pGrid->getMaxVertex(1)); GPU_CHECK_EQUAL(-4.0, pGrid->getVertex(1, 6)); GPU_CHECK_EQUAL(10, pGrid->getNumVertices(0)); GPU_CHECK_EQUAL(10, pGrid->getNumVerticesSq(0)); GPU_CHECK_EQUAL(0, pGrid->getNumVerticesSq(1)); GPU_CHECK_EQUAL(10*20*30, pGrid->getNumCells()); Array<int, 3> indices{9, 19, 29}; GPU_CHECK(not pGrid->isOutside(indices.data())); indices = {10, 19, 29}; GPU_CHECK(pGrid->isOutside(indices.data())); indices = {9, 20, 29}; GPU_CHECK(pGrid->isOutside(indices.data())); indices = {9, 19, 30}; GPU_CHECK(pGrid->isOutside(indices.data())); indices = {0, 0, 0}; GPU_CHECK_EQUAL(0, pGrid->calcIndex(indices)); indices = {0, 2, 0}; GPU_CHECK_EQUAL(10*2, pGrid->calcIndex(indices)); indices = {0, 0, 2}; GPU_CHECK_EQUAL(10*20*2, pGrid->calcIndex(indices)); indices = {9, 19, 29}; GPU_CHECK_EQUAL(10*20*30 - 1, pGrid->calcIndex(indices)); } TEST_FIXTURE(GridSystemInterfaceTester, ConstructorsAndGettersOnGPU){ bool* pTestVal; hipMallocManaged(&pTestVal, sizeof(bool)); hipLaunchKernelGGL(( testSpatialGrid), dim3(1), dim3(1), 0, 0, pTestVal, pGrid.get()); hipDeviceSynchronize(); CHECK(*pTestVal); hipFree(pTestVal); } #endif } } // end namespace
db84a17b57d88ffbd1497f4f171fba64e1024557.cu
#include <UnitTest++.h> #include <memory> #include "MonteRay_GridSystemInterface.hh" #include "UnitTestHelper.hh" #include <array> using namespace MonteRay; namespace GridSystemInterfaceTester { SUITE(GridSystemInterface_test) { class GridSystemInterfaceTester{ public: std::unique_ptr<MonteRay_GridSystemInterface> pGrid; GridSystemInterfaceTester(){ std::array<MonteRay_GridBins, 3> gridBins{ MonteRay_GridBins(0, 10, 10, MonteRay_GridBins::RADIAL), MonteRay_GridBins(-10, 10, 20), MonteRay_GridBins(-10, 10, 30) }; pGrid = std::make_unique<MonteRay_GridSystemInterface>(gridBins, 3); } }; TEST_FIXTURE(GridSystemInterfaceTester, ConstructorsAndGetters){ CHECK_EQUAL(3, pGrid->getDimension()); CHECK_EQUAL(10, pGrid->getNumGridBins(0)); CHECK_EQUAL(20, pGrid->getNumGridBins(1)); CHECK_EQUAL(30, pGrid->getNumGridBins(2)); CHECK_EQUAL(1, pGrid->getMinVertex(0)); CHECK_EQUAL(-10, pGrid->getMinVertex(1)); CHECK_EQUAL(10, pGrid->getMaxVertex(0)); CHECK_EQUAL(10, pGrid->getMaxVertex(1)); CHECK_EQUAL(-4.0, pGrid->getVertex(1, 6)); CHECK_EQUAL(10, pGrid->getNumVertices(0)); CHECK_EQUAL(10, pGrid->getNumVerticesSq(0)); CHECK_EQUAL(0, pGrid->getNumVerticesSq(1)); CHECK_EQUAL(10*20*30, pGrid->getNumCells()); std::array<int, 3> indices{9, 19, 29}; CHECK(not pGrid->isOutside(indices.data())); indices = {10, 19, 29}; CHECK(pGrid->isOutside(indices.data())); indices = {9, 20, 29}; CHECK(pGrid->isOutside(indices.data())); indices = {9, 19, 30}; CHECK(pGrid->isOutside(indices.data())); indices = {0, 0, 0}; CHECK_EQUAL(0, pGrid->calcIndex(indices)); indices = {0, 2, 0}; CHECK_EQUAL(10*2, pGrid->calcIndex(indices)); indices = {0, 0, 2}; CHECK_EQUAL(10*20*2, pGrid->calcIndex(indices)); indices = {9, 19, 29}; CHECK_EQUAL(10*20*30 - 1, pGrid->calcIndex(indices)); } #ifdef __CUDACC__ __global__ void testSpatialGrid(bool* testVal, MonteRay_GridSystemInterface* pGrid) { *testVal = true; GPU_CHECK_EQUAL(3, pGrid->getDimension()); GPU_CHECK_EQUAL(10, pGrid->getNumGridBins(0)); GPU_CHECK_EQUAL(20, pGrid->getNumGridBins(1)); GPU_CHECK_EQUAL(30, pGrid->getNumGridBins(2)); GPU_CHECK_EQUAL(1, pGrid->getMinVertex(0)); GPU_CHECK_EQUAL(-10, pGrid->getMinVertex(1)); GPU_CHECK_EQUAL(10, pGrid->getMaxVertex(0)); GPU_CHECK_EQUAL(10, pGrid->getMaxVertex(1)); GPU_CHECK_EQUAL(-4.0, pGrid->getVertex(1, 6)); GPU_CHECK_EQUAL(10, pGrid->getNumVertices(0)); GPU_CHECK_EQUAL(10, pGrid->getNumVerticesSq(0)); GPU_CHECK_EQUAL(0, pGrid->getNumVerticesSq(1)); GPU_CHECK_EQUAL(10*20*30, pGrid->getNumCells()); Array<int, 3> indices{9, 19, 29}; GPU_CHECK(not pGrid->isOutside(indices.data())); indices = {10, 19, 29}; GPU_CHECK(pGrid->isOutside(indices.data())); indices = {9, 20, 29}; GPU_CHECK(pGrid->isOutside(indices.data())); indices = {9, 19, 30}; GPU_CHECK(pGrid->isOutside(indices.data())); indices = {0, 0, 0}; GPU_CHECK_EQUAL(0, pGrid->calcIndex(indices)); indices = {0, 2, 0}; GPU_CHECK_EQUAL(10*2, pGrid->calcIndex(indices)); indices = {0, 0, 2}; GPU_CHECK_EQUAL(10*20*2, pGrid->calcIndex(indices)); indices = {9, 19, 29}; GPU_CHECK_EQUAL(10*20*30 - 1, pGrid->calcIndex(indices)); } TEST_FIXTURE(GridSystemInterfaceTester, ConstructorsAndGettersOnGPU){ bool* pTestVal; cudaMallocManaged(&pTestVal, sizeof(bool)); testSpatialGrid<<<1, 1>>>(pTestVal, pGrid.get()); cudaDeviceSynchronize(); CHECK(*pTestVal); cudaFree(pTestVal); } #endif } } // end namespace
6dca659ec3ce5ce9d92ece8a783074a18f3b5f04.hip
// !!! This is a file automatically generated by hipify!!! #include <chrono> #include <cmath> #include <cstdio> #include <cstdlib> #include <hip/hip_runtime.h> #include <iomanip> #include <iostream> // helper for time measurement typedef std::chrono::duration<double, std::milli> d_ms; const auto &now = std::chrono::high_resolution_clock::now; // Define Error Checking Macro #define CU_CHK(ERRORCODE) \ { \ hipError_t error = ERRORCODE; \ if (error != 0) { \ std::cerr << hipGetErrorName(error) << ": " \ << hipGetErrorString(error) << " at " << __FILE__ << ":" \ << __LINE__ << "\n"; \ } \ } // Constants const static int DEFAULT_NUM_DATA = 1024; const static int DEFAULT_NUM_CLUSTER = 3; const static int DEFAULT_DIMENSIONS = 2; const static int DEFAULT_NUM_ITERATIONS = 5; const static int DEFAULT_BLOCK_DIM = 1024; extern void kmeans_cluster_assignment_wrapper(int grid_size, int block_size, float *data, int *data_ca, float *centroids, int numData, int numCluster, int numDimensions); extern void kmeans_centroid_sum_wrapper(int grid_size, int block_size, float *data, int *data_ca, float *centroids, int *cluster_count, int numData, int numCluster, int numDimensions); extern void kmeans_centriod_update_wrapper(int grid_size, int block_size, float *centroids, int *cluster_count, int numCluster, int numDimensions); // Main int main(int argc, char *argv[]) { // Process Arguments if (argc < 2 or std::string(argv[1]) == "-h") { std::cout << "Usage:\n\t" << argv[0] << " <data points> [iterations] [dimensions]\n"; return 1; } int numData = 0; numData = std::stoi(argv[1]); numData = numData > 0 ? numData : DEFAULT_NUM_DATA; int numIterations = 0; if (argc > 2) numIterations = std::stoi(argv[2]); numIterations = numIterations != 0 ? numIterations : DEFAULT_NUM_ITERATIONS; int numDimensions = 0; if (argc > 3) numDimensions = std::stoi(argv[2]); numDimensions = numDimensions != 0 ? numDimensions : DEFAULT_DIMENSIONS; int numCluster = DEFAULT_NUM_CLUSTER; // Allocate Memory // Host float *h_data; CU_CHK(hipHostMalloc(&h_data, (size_t)(numData * numDimensions * sizeof(float)))); float *h_centroids; CU_CHK(hipHostMalloc(&h_centroids, (size_t)(numCluster * numDimensions * sizeof(float)))); // Init srand(0); // Always the same random numbers for (int i = 0; i < numData; ++i) for (int d = 0; d < numDimensions; ++d) h_centroids[i * numDimensions + d] = (float)rand() / (double)RAND_MAX; for (int c = 0; c < numCluster; ++c) for (int d = 0; d < numDimensions; ++d) h_centroids[c * numDimensions + d] = (float)rand() / (double)RAND_MAX; // Device Memory float *d_data; CU_CHK(hipMalloc(&d_data, (size_t)(numData * numDimensions * sizeof(float)))); int *d_data_ca; CU_CHK(hipMalloc(&d_data_ca, (size_t)(numData * sizeof(int)))); float *d_centroids; CU_CHK(hipMalloc(&d_centroids, (size_t)(numCluster * numDimensions * sizeof(float)))); int *d_cluster_count; CU_CHK(hipMalloc(&d_cluster_count, (size_t)(numCluster * sizeof(int)))); // Copy Data to the Device auto t1 = now(); CU_CHK(hipMemcpy(d_data, h_data, (size_t)(numData * numDimensions * sizeof(float)), hipMemcpyHostToDevice)); CU_CHK(hipMemcpy(d_centroids, d_centroids, (size_t)(numCluster * numDimensions * sizeof(float)), hipMemcpyHostToDevice)); CU_CHK(hipMemset(d_cluster_count, 0, numCluster * sizeof(int))); auto t2 = now(); // Block Dimension / Threads per Block int block_dim = DEFAULT_BLOCK_DIM; int grid_dim = ceil(static_cast<float>(numData) / static_cast<float>(block_dim)); std::cout << "Computing kmeans with " << numData << " elements and " << numIterations << " iterations\n"; std::cout << "Launch kernel with " << grid_dim << " blocks and " << block_dim << " threads per block\n"; auto t3 = now(); for (int i = 0; i < numIterations; i++) { kmeans_cluster_assignment_wrapper(grid_dim, block_dim, d_data, d_data_ca, d_centroids, numData, numCluster, numDimensions); CU_CHK( hipMemset(d_centroids, 0, numCluster * numDimensions * sizeof(float))); CU_CHK(hipMemset(d_cluster_count, 0, numCluster * sizeof(int))); kmeans_centroid_sum_wrapper(grid_dim, block_dim, d_data, d_data_ca, d_centroids, d_cluster_count, numData, numCluster, numDimensions); kmeans_centriod_update_wrapper(grid_dim, numCluster * numDimensions, d_centroids, d_cluster_count, numCluster, numDimensions); } // Synchronize CU_CHK(hipDeviceSynchronize()); auto t4 = now(); // Compute time for copies and kernel d_ms time_copyH2D = t2 - t1; d_ms time_kernel = t4 - t3; // Free Memory CU_CHK(hipHostFree(h_data)); CU_CHK(hipHostFree(h_centroids)); CU_CHK(hipFree(d_data)); CU_CHK(hipFree(d_centroids)); CU_CHK(hipFree(d_data_ca)); CU_CHK(hipFree(d_cluster_count)); // Print Meassurement Results std::cout << "Results:\n" << "H2D [ms], kernel [ms]\n" << time_copyH2D.count() << ", " << time_kernel.count() << "\n"; return 0; }
6dca659ec3ce5ce9d92ece8a783074a18f3b5f04.cu
#include <chrono> #include <cmath> #include <cstdio> #include <cstdlib> #include <cuda_runtime.h> #include <iomanip> #include <iostream> // helper for time measurement typedef std::chrono::duration<double, std::milli> d_ms; const auto &now = std::chrono::high_resolution_clock::now; // Define Error Checking Macro #define CU_CHK(ERRORCODE) \ { \ cudaError_t error = ERRORCODE; \ if (error != 0) { \ std::cerr << cudaGetErrorName(error) << ": " \ << cudaGetErrorString(error) << " at " << __FILE__ << ":" \ << __LINE__ << "\n"; \ } \ } // Constants const static int DEFAULT_NUM_DATA = 1024; const static int DEFAULT_NUM_CLUSTER = 3; const static int DEFAULT_DIMENSIONS = 2; const static int DEFAULT_NUM_ITERATIONS = 5; const static int DEFAULT_BLOCK_DIM = 1024; extern void kmeans_cluster_assignment_wrapper(int grid_size, int block_size, float *data, int *data_ca, float *centroids, int numData, int numCluster, int numDimensions); extern void kmeans_centroid_sum_wrapper(int grid_size, int block_size, float *data, int *data_ca, float *centroids, int *cluster_count, int numData, int numCluster, int numDimensions); extern void kmeans_centriod_update_wrapper(int grid_size, int block_size, float *centroids, int *cluster_count, int numCluster, int numDimensions); // Main int main(int argc, char *argv[]) { // Process Arguments if (argc < 2 or std::string(argv[1]) == "-h") { std::cout << "Usage:\n\t" << argv[0] << " <data points> [iterations] [dimensions]\n"; return 1; } int numData = 0; numData = std::stoi(argv[1]); numData = numData > 0 ? numData : DEFAULT_NUM_DATA; int numIterations = 0; if (argc > 2) numIterations = std::stoi(argv[2]); numIterations = numIterations != 0 ? numIterations : DEFAULT_NUM_ITERATIONS; int numDimensions = 0; if (argc > 3) numDimensions = std::stoi(argv[2]); numDimensions = numDimensions != 0 ? numDimensions : DEFAULT_DIMENSIONS; int numCluster = DEFAULT_NUM_CLUSTER; // Allocate Memory // Host float *h_data; CU_CHK(cudaMallocHost(&h_data, (size_t)(numData * numDimensions * sizeof(float)))); float *h_centroids; CU_CHK(cudaMallocHost(&h_centroids, (size_t)(numCluster * numDimensions * sizeof(float)))); // Init srand(0); // Always the same random numbers for (int i = 0; i < numData; ++i) for (int d = 0; d < numDimensions; ++d) h_centroids[i * numDimensions + d] = (float)rand() / (double)RAND_MAX; for (int c = 0; c < numCluster; ++c) for (int d = 0; d < numDimensions; ++d) h_centroids[c * numDimensions + d] = (float)rand() / (double)RAND_MAX; // Device Memory float *d_data; CU_CHK(cudaMalloc(&d_data, (size_t)(numData * numDimensions * sizeof(float)))); int *d_data_ca; CU_CHK(cudaMalloc(&d_data_ca, (size_t)(numData * sizeof(int)))); float *d_centroids; CU_CHK(cudaMalloc(&d_centroids, (size_t)(numCluster * numDimensions * sizeof(float)))); int *d_cluster_count; CU_CHK(cudaMalloc(&d_cluster_count, (size_t)(numCluster * sizeof(int)))); // Copy Data to the Device auto t1 = now(); CU_CHK(cudaMemcpy(d_data, h_data, (size_t)(numData * numDimensions * sizeof(float)), cudaMemcpyHostToDevice)); CU_CHK(cudaMemcpy(d_centroids, d_centroids, (size_t)(numCluster * numDimensions * sizeof(float)), cudaMemcpyHostToDevice)); CU_CHK(cudaMemset(d_cluster_count, 0, numCluster * sizeof(int))); auto t2 = now(); // Block Dimension / Threads per Block int block_dim = DEFAULT_BLOCK_DIM; int grid_dim = ceil(static_cast<float>(numData) / static_cast<float>(block_dim)); std::cout << "Computing kmeans with " << numData << " elements and " << numIterations << " iterations\n"; std::cout << "Launch kernel with " << grid_dim << " blocks and " << block_dim << " threads per block\n"; auto t3 = now(); for (int i = 0; i < numIterations; i++) { kmeans_cluster_assignment_wrapper(grid_dim, block_dim, d_data, d_data_ca, d_centroids, numData, numCluster, numDimensions); CU_CHK( cudaMemset(d_centroids, 0, numCluster * numDimensions * sizeof(float))); CU_CHK(cudaMemset(d_cluster_count, 0, numCluster * sizeof(int))); kmeans_centroid_sum_wrapper(grid_dim, block_dim, d_data, d_data_ca, d_centroids, d_cluster_count, numData, numCluster, numDimensions); kmeans_centriod_update_wrapper(grid_dim, numCluster * numDimensions, d_centroids, d_cluster_count, numCluster, numDimensions); } // Synchronize CU_CHK(cudaDeviceSynchronize()); auto t4 = now(); // Compute time for copies and kernel d_ms time_copyH2D = t2 - t1; d_ms time_kernel = t4 - t3; // Free Memory CU_CHK(cudaFreeHost(h_data)); CU_CHK(cudaFreeHost(h_centroids)); CU_CHK(cudaFree(d_data)); CU_CHK(cudaFree(d_centroids)); CU_CHK(cudaFree(d_data_ca)); CU_CHK(cudaFree(d_cluster_count)); // Print Meassurement Results std::cout << "Results:\n" << "H2D [ms], kernel [ms]\n" << time_copyH2D.count() << ", " << time_kernel.count() << "\n"; return 0; }
43b2d2cdc39acd3dde719776a72170506dbc1a45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" __global__ void tanh_double(int n,int idx,double *dy,int incy,double *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = tanh(dy[i]); } }
43b2d2cdc39acd3dde719776a72170506dbc1a45.cu
#include "includes.h" extern "C" __global__ void tanh_double(int n,int idx,double *dy,int incy,double *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = tanh(dy[i]); } }
3314223527d484d8601b8d4e4bc17d396c97484f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "class_based_boxes_single_op.h" namespace caffe2 { namespace { template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ int gpu_atomic_add(const int val, int* address) { return atomicAdd(address, val); } __device__ void _copy(const int size, const float* source, float* target) { for (int i=0; i<size; i++) { *(target++) = *(source++); } } __global__ void ClassBasedBoxesSingleForward(const int nthreads, const float* input_boxes, const float* input_feats, const int num_feat, const float cls_float, float* cls_box, float* cls_feat, int* counter) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int bp = i * 7; const float cls_f = input_boxes[bp+5]; const float valid = input_boxes[bp+6]; if (cls_f == cls_float && valid > 0.) { const int fp = i * num_feat; // returns the old index const int idx = gpu_atomic_add(1, counter); const int bbp = idx * 8; const int ffp = idx * num_feat; const float x1 = input_boxes[bp]; const float y1 = input_boxes[bp+1]; const float x2 = input_boxes[bp+2]; const float y2 = input_boxes[bp+3]; cls_box[bbp] = x1; cls_box[bbp+1] = y1; cls_box[bbp+2] = x2; cls_box[bbp+3] = y2; cls_box[bbp+4] = input_boxes[bp+4]; cls_box[bbp+5] = (x2 - x1 + 1.) * (y2 - y1 + 1.); cls_box[bbp+6] = cls_f; // leave a dimension to encode suppressed information cls_box[bbp+7] = 0.; _copy(num_feat, input_feats + fp, cls_feat + ffp); } } } } // namespace template<> bool ClassBasedBoxesSingleOp<float, CUDAContext>::RunOnDevice() { auto& stats = Input(0); const int num_cls = stats.dim32(0); DCHECK_LT(class_, num_cls); auto& boxes = Input(1); const int num_total = boxes.dim32(0); DCHECK_EQ(boxes.dim32(1), 7); auto& feats = Input(2); DCHECK_EQ(feats.dim32(0), num_total); const int num_feat = feats.dim32(1); // get the statistics to cpu for the current class int current_cls_count; const int* stats_pointer = stats.data<int>(); context_.Copy<int, CUDAContext, CPUContext>(1, stats_pointer + class_, &current_cls_count); // use the stats to initialize class based tensors auto* cls_boxes = Output(0); cls_boxes->Resize(current_cls_count, 8); auto* cls_feats = Output(1); cls_feats->Resize(current_cls_count, num_feat); float* cls_boxes_pointer = cls_boxes->mutable_data<float>(); float* cls_feats_pointer = cls_feats->mutable_data<float>(); counter_pointer_ = counter_.mutable_data<int>(); if (current_cls_count > 0) { // reset counter to zero math::Set<int, CUDAContext>(1, 0, counter_pointer_, &context_); // now copy things to different classes hipLaunchKernelGGL(( ClassBasedBoxesSingleForward), dim3(CAFFE_GET_BLOCKS(num_total)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), num_total, boxes.data<float>(), feats.data<float>(), num_feat, class_float_, cls_boxes_pointer, cls_feats_pointer, counter_pointer_); } // otherwise we do not need to do anything! return true; } REGISTER_CUDA_OPERATOR(ClassBasedBoxesSingle, ClassBasedBoxesSingleOp<float, CUDAContext>); } // namespace caffe2
3314223527d484d8601b8d4e4bc17d396c97484f.cu
#include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "class_based_boxes_single_op.h" namespace caffe2 { namespace { template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ int gpu_atomic_add(const int val, int* address) { return atomicAdd(address, val); } __device__ void _copy(const int size, const float* source, float* target) { for (int i=0; i<size; i++) { *(target++) = *(source++); } } __global__ void ClassBasedBoxesSingleForward(const int nthreads, const float* input_boxes, const float* input_feats, const int num_feat, const float cls_float, float* cls_box, float* cls_feat, int* counter) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int bp = i * 7; const float cls_f = input_boxes[bp+5]; const float valid = input_boxes[bp+6]; if (cls_f == cls_float && valid > 0.) { const int fp = i * num_feat; // returns the old index const int idx = gpu_atomic_add(1, counter); const int bbp = idx * 8; const int ffp = idx * num_feat; const float x1 = input_boxes[bp]; const float y1 = input_boxes[bp+1]; const float x2 = input_boxes[bp+2]; const float y2 = input_boxes[bp+3]; cls_box[bbp] = x1; cls_box[bbp+1] = y1; cls_box[bbp+2] = x2; cls_box[bbp+3] = y2; cls_box[bbp+4] = input_boxes[bp+4]; cls_box[bbp+5] = (x2 - x1 + 1.) * (y2 - y1 + 1.); cls_box[bbp+6] = cls_f; // leave a dimension to encode suppressed information cls_box[bbp+7] = 0.; _copy(num_feat, input_feats + fp, cls_feat + ffp); } } } } // namespace template<> bool ClassBasedBoxesSingleOp<float, CUDAContext>::RunOnDevice() { auto& stats = Input(0); const int num_cls = stats.dim32(0); DCHECK_LT(class_, num_cls); auto& boxes = Input(1); const int num_total = boxes.dim32(0); DCHECK_EQ(boxes.dim32(1), 7); auto& feats = Input(2); DCHECK_EQ(feats.dim32(0), num_total); const int num_feat = feats.dim32(1); // get the statistics to cpu for the current class int current_cls_count; const int* stats_pointer = stats.data<int>(); context_.Copy<int, CUDAContext, CPUContext>(1, stats_pointer + class_, &current_cls_count); // use the stats to initialize class based tensors auto* cls_boxes = Output(0); cls_boxes->Resize(current_cls_count, 8); auto* cls_feats = Output(1); cls_feats->Resize(current_cls_count, num_feat); float* cls_boxes_pointer = cls_boxes->mutable_data<float>(); float* cls_feats_pointer = cls_feats->mutable_data<float>(); counter_pointer_ = counter_.mutable_data<int>(); if (current_cls_count > 0) { // reset counter to zero math::Set<int, CUDAContext>(1, 0, counter_pointer_, &context_); // now copy things to different classes ClassBasedBoxesSingleForward<<<CAFFE_GET_BLOCKS(num_total), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(num_total, boxes.data<float>(), feats.data<float>(), num_feat, class_float_, cls_boxes_pointer, cls_feats_pointer, counter_pointer_); } // otherwise we do not need to do anything! return true; } REGISTER_CUDA_OPERATOR(ClassBasedBoxesSingle, ClassBasedBoxesSingleOp<float, CUDAContext>); } // namespace caffe2
8adb01ecddef7620aaf66daa1323b7cfdf47f777.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef USE_MQX #include "mqx.h" #endif #define TVAL(t) ((t).tv_sec * 1000.0 + (t).tv_usec / 1000.0) #define TDIFF(t1, t2) (TVAL(t2) - TVAL(t1)) #ifndef CUDA_SAFE_CALL #define CUDA_SAFE_CALL(call) \ do { \ hipError_t err = call; \ if(hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } while (0) #endif #define BLOCK_SIZE 16 #define WIDTH (BLOCK_SIZE * 128) #define HEIGHT WIDTH // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } //float multiplication kernel called by MatMul() __global__ void MatMulKernel(float *A, float *B, float *C) { // Each thread computes one element of C by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < WIDTH; ++e) Cvalue += A[row * WIDTH + e] * B[e * WIDTH + col]; C[row * WIDTH + col] = Cvalue; } // float multiplication - Host code // float dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const float *A, const float *B, float *C) { size_t size = WIDTH * HEIGHT * sizeof(float); float *d_A, *d_B, *d_C; struct timeval t1, t2; gettimeofday(&t1, NULL); // Load A and B to device memory CUDA_SAFE_CALL(hipMalloc((void**)&d_A, size)); CUDA_SAFE_CALL(hipMemcpy(d_A, A, size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMalloc((void**)&d_B, size)); CUDA_SAFE_CALL(hipMemcpy(d_B, B, size, hipMemcpyHostToDevice)); // Allocate C in device memory CUDA_SAFE_CALL(hipMalloc((void**)&d_C, size)); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(WIDTH / dimBlock.x, HEIGHT / dimBlock.y); #ifdef USE_MQX CUDA_SAFE_CALL(cudaAdvise(0, CADV_INPUT)); CUDA_SAFE_CALL(cudaAdvise(1, CADV_INPUT)); CUDA_SAFE_CALL(cudaAdvise(2, CADV_OUTPUT)); #endif hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); CUDA_SAFE_CALL(hipDeviceSynchronize()); // Read C from device memory hipMemcpy(C, d_C, size,hipMemcpyDeviceToHost); // Free device memory hipFree(d_A); hipFree(d_B); hipFree(d_C); gettimeofday(&t2, NULL); printf("Matrix multiplication took %f ms\n", TDIFF(t1, t2)); } int main(int argc, char* argv[]) { unsigned int size = WIDTH*HEIGHT; unsigned int mem_size = sizeof(float) * size; float *h_A, *h_B, *h_C; // Allocate host memory for matrices A and B h_A = (float*)malloc(mem_size); h_B = (float*)malloc(mem_size); h_C = (float*)malloc(mem_size); // set seed for rand() srand(2014); // initialize host memory randomInit(h_A, size); randomInit(h_B, size); //invoke MatMul MatMul(h_A,h_B,h_C); free(h_C); free(h_B); free(h_A); return 0; }
8adb01ecddef7620aaf66daa1323b7cfdf47f777.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef USE_MQX #include "mqx.h" #endif #define TVAL(t) ((t).tv_sec * 1000.0 + (t).tv_usec / 1000.0) #define TDIFF(t1, t2) (TVAL(t2) - TVAL(t1)) #ifndef CUDA_SAFE_CALL #define CUDA_SAFE_CALL(call) \ do { \ cudaError_t err = call; \ if(cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } while (0) #endif #define BLOCK_SIZE 16 #define WIDTH (BLOCK_SIZE * 128) #define HEIGHT WIDTH // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } //float multiplication kernel called by MatMul() __global__ void MatMulKernel(float *A, float *B, float *C) { // Each thread computes one element of C by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < WIDTH; ++e) Cvalue += A[row * WIDTH + e] * B[e * WIDTH + col]; C[row * WIDTH + col] = Cvalue; } // float multiplication - Host code // float dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const float *A, const float *B, float *C) { size_t size = WIDTH * HEIGHT * sizeof(float); float *d_A, *d_B, *d_C; struct timeval t1, t2; gettimeofday(&t1, NULL); // Load A and B to device memory CUDA_SAFE_CALL(cudaMalloc((void**)&d_A, size)); CUDA_SAFE_CALL(cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_B, size)); CUDA_SAFE_CALL(cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice)); // Allocate C in device memory CUDA_SAFE_CALL(cudaMalloc((void**)&d_C, size)); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(WIDTH / dimBlock.x, HEIGHT / dimBlock.y); #ifdef USE_MQX CUDA_SAFE_CALL(cudaAdvise(0, CADV_INPUT)); CUDA_SAFE_CALL(cudaAdvise(1, CADV_INPUT)); CUDA_SAFE_CALL(cudaAdvise(2, CADV_OUTPUT)); #endif MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); CUDA_SAFE_CALL(cudaThreadSynchronize()); // Read C from device memory cudaMemcpy(C, d_C, size,cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); gettimeofday(&t2, NULL); printf("Matrix multiplication took %f ms\n", TDIFF(t1, t2)); } int main(int argc, char* argv[]) { unsigned int size = WIDTH*HEIGHT; unsigned int mem_size = sizeof(float) * size; float *h_A, *h_B, *h_C; // Allocate host memory for matrices A and B h_A = (float*)malloc(mem_size); h_B = (float*)malloc(mem_size); h_C = (float*)malloc(mem_size); // set seed for rand() srand(2014); // initialize host memory randomInit(h_A, size); randomInit(h_B, size); //invoke MatMul MatMul(h_A,h_B,h_C); free(h_C); free(h_B); free(h_A); return 0; }
9f21da1c802ac53a19fefbebf0960dbf5397aab7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* | * MCQUAD - CUDA MONTE CARLO INTEGRATOR | *________________________________________________________________| * * Performs a Monte Carlo integration of the form * \int_{0}^{\inf} \exp{-x} g(x) dx for g(x) = ln(x) * * Draws N samples for each integration according to the density * -> g(x). * * INPUT PARAMETERS: * ntrials (total nuber of trials), * nsamps (samples taken per trial) * * Written by Brandon B. Miller */ #include <stdlib.h> #include <stdio.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <math.h> __global__ void setup_kernel(int N, long int seed, hiprandState_t *state) { // Set up the RNG for each sample thread int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < N) { // Each RNG state is different // It augomatically increments itself hiprand_init(seed, id, 0, &state[id]); } } __global__ void do_trials(int ntrials, int nsamps, double* results, hiprandState_t *state) { int id = threadIdx.x + blockIdx.x * blockDim.x; // We will have each thread do a trial. So we need to launch // Ntrials blocks each with one thread. if (id < ntrials) { double sum = 0; for (int sample = 0; sample < nsamps; sample++) { sum += cos(-log(hiprand_uniform_double(&state[id]))); } results[id] = sum / nsamps; // Answer } } static void HandleError (hipError_t err, const char* file, int line) { if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), file, line); exit(1); } } #define HANDLE_ERROR( err ) (HandleError(err, __FILE__, __LINE__)) int main(int argc, char* argv[]) { // Initial Machinery to select the GPU // ___________________________________ hipDeviceProp_t prop; // This is a blank struct at this point int dev; memset(&prop, 0, sizeof(hipDeviceProp_t)); // Initialize the struct prop.multiProcessorCount = 13; hipChooseDevice(&dev, &prop); hipSetDevice(dev); hipGetDeviceProperties(&prop, dev); // ___________________________________ // Initial Machinery to read in params // __________________________________ float tym; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop ); hipEventRecord(start, 0); int nsamps; int ntrials; FILE* inputfile; FILE* outputfile; if (argc != 3) { printf("Incorrect usage: only enter the input and output filenames\n"); return 0; } inputfile = fopen(argv[1], "r"); if (!inputfile) { printf("Unable to open input file \n"); return 0; } fscanf(inputfile, "%d", &nsamps); fscanf(inputfile, "%d", &ntrials); // __________________________________ double* results = (double *)malloc(ntrials * sizeof(double)); // Random number generation hiprandState_t* dev_states; double* dev_results; // will contain final random numbers if ( hipSuccess != hipMalloc((void**)&dev_results, ntrials*sizeof(double)) ) { printf("hipMalloc Failed..."); exit(1); } // THERE IS NOW AN NTRIALS LENGTH ARRAY IN GLOBAL MEM ON THE DEVICE if ( hipSuccess != hipMalloc((void**)&dev_states, ntrials*sizeof(hiprandState_t)) ) { printf("hipMalloc Failed..."); exit(1); } // dev_states is an array containing an RNG state to be used for each trial // We will index into it uniquely based on thread and blockID within the kernel hipLaunchKernelGGL(( setup_kernel), dim3(ntrials), dim3(1), 0, 0, nsamps, time(NULL), dev_states); // FIXME - Launch a block for each trial with one thread each - SLOW hipLaunchKernelGGL(( do_trials), dim3(ntrials), dim3(1), 0, 0, ntrials, nsamps, dev_results, dev_states); // Retrieve results hipMemcpy(results, dev_results, ntrials*sizeof(double), hipMemcpyDeviceToHost); outputfile = fopen(argv[2], "wb"); fwrite(results, sizeof(double), ntrials, outputfile); // Closing machinery hipFree(dev_results); hipFree(dev_states); free(results); fclose(outputfile); fclose(inputfile); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&tym, start, stop); printf("Elapsed time %3.1f milliseconds", tym); return 0; }
9f21da1c802ac53a19fefbebf0960dbf5397aab7.cu
/* | * MCQUAD - CUDA MONTE CARLO INTEGRATOR | *________________________________________________________________| * * Performs a Monte Carlo integration of the form * \int_{0}^{\inf} \exp{-x} g(x) dx for g(x) = ln(x) * * Draws N samples for each integration according to the density * -> g(x). * * INPUT PARAMETERS: * ntrials (total nuber of trials), * nsamps (samples taken per trial) * * Written by Brandon B. Miller */ #include <stdlib.h> #include <stdio.h> #include <curand.h> #include <curand_kernel.h> #include <math.h> __global__ void setup_kernel(int N, long int seed, curandState_t *state) { // Set up the RNG for each sample thread int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < N) { // Each RNG state is different // It augomatically increments itself curand_init(seed, id, 0, &state[id]); } } __global__ void do_trials(int ntrials, int nsamps, double* results, curandState_t *state) { int id = threadIdx.x + blockIdx.x * blockDim.x; // We will have each thread do a trial. So we need to launch // Ntrials blocks each with one thread. if (id < ntrials) { double sum = 0; for (int sample = 0; sample < nsamps; sample++) { sum += cos(-log(curand_uniform_double(&state[id]))); } results[id] = sum / nsamps; // Answer } } static void HandleError (cudaError_t err, const char* file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(1); } } #define HANDLE_ERROR( err ) (HandleError(err, __FILE__, __LINE__)) int main(int argc, char* argv[]) { // Initial Machinery to select the GPU // ___________________________________ cudaDeviceProp prop; // This is a blank struct at this point int dev; memset(&prop, 0, sizeof(cudaDeviceProp)); // Initialize the struct prop.multiProcessorCount = 13; cudaChooseDevice(&dev, &prop); cudaSetDevice(dev); cudaGetDeviceProperties(&prop, dev); // ___________________________________ // Initial Machinery to read in params // __________________________________ float tym; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop ); cudaEventRecord(start, 0); int nsamps; int ntrials; FILE* inputfile; FILE* outputfile; if (argc != 3) { printf("Incorrect usage: only enter the input and output filenames\n"); return 0; } inputfile = fopen(argv[1], "r"); if (!inputfile) { printf("Unable to open input file \n"); return 0; } fscanf(inputfile, "%d", &nsamps); fscanf(inputfile, "%d", &ntrials); // __________________________________ double* results = (double *)malloc(ntrials * sizeof(double)); // Random number generation curandState_t* dev_states; double* dev_results; // will contain final random numbers if ( cudaSuccess != cudaMalloc((void**)&dev_results, ntrials*sizeof(double)) ) { printf("cudaMalloc Failed..."); exit(1); } // THERE IS NOW AN NTRIALS LENGTH ARRAY IN GLOBAL MEM ON THE DEVICE if ( cudaSuccess != cudaMalloc((void**)&dev_states, ntrials*sizeof(curandState_t)) ) { printf("cudaMalloc Failed..."); exit(1); } // dev_states is an array containing an RNG state to be used for each trial // We will index into it uniquely based on thread and blockID within the kernel setup_kernel<<<ntrials, 1>>>(nsamps, time(NULL), dev_states); // FIXME - Launch a block for each trial with one thread each - SLOW do_trials<<<ntrials, 1>>>(ntrials, nsamps, dev_results, dev_states); // Retrieve results cudaMemcpy(results, dev_results, ntrials*sizeof(double), cudaMemcpyDeviceToHost); outputfile = fopen(argv[2], "wb"); fwrite(results, sizeof(double), ntrials, outputfile); // Closing machinery cudaFree(dev_results); cudaFree(dev_states); free(results); fclose(outputfile); fclose(inputfile); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&tym, start, stop); printf("Elapsed time %3.1f milliseconds", tym); return 0; }
98933bd8ed7e5520afea0f711a1da3bad9f1a99e.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/copy_range.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/dictionary/detail/update_keys.hpp> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/strings/detail/copy_range.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <thrust/iterator/constant_iterator.h> #include <hip/hip_runtime.h> #include <memory> namespace { template <typename T> void in_place_copy_range(cudf::column_view const& source, cudf::mutable_column_view& target, cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, hipStream_t stream = 0) { auto p_source_device_view = cudf::column_device_view::create(source, stream); if (source.has_nulls()) { cudf::detail::copy_range( cudf::detail::make_null_replacement_iterator<T>(*p_source_device_view, T()) + source_begin, cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin, target, target_begin, target_begin + (source_end - source_begin), stream); } else { cudf::detail::copy_range(p_source_device_view->begin<T>() + source_begin, thrust::make_constant_iterator(true), // dummy target, target_begin, target_begin + (source_end - source_begin), stream); } } struct in_place_copy_range_dispatch { cudf::column_view const& source; cudf::mutable_column_view& target; template <typename T> std::enable_if_t<cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, hipStream_t stream = 0) { in_place_copy_range<T>(source, target, source_begin, source_end, target_begin, stream); } template <typename T> std::enable_if_t<not cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, hipStream_t stream = 0) { CUDF_FAIL("in-place copy does not work for variable width types."); } }; struct out_of_place_copy_range_dispatch { cudf::column_view const& source; cudf::column_view const& target; template <typename T> std::unique_ptr<cudf::column> operator()( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(), hipStream_t stream = 0) { auto p_ret = std::make_unique<cudf::column>(target, stream, mr); if ((!p_ret->nullable()) && source.has_nulls(source_begin, source_end)) { p_ret->set_null_mask( cudf::create_null_mask(p_ret->size(), cudf::mask_state::ALL_VALID, stream, mr), 0); } if (source_end != source_begin) { // otherwise no-op auto ret_view = p_ret->mutable_view(); in_place_copy_range<T>(source, ret_view, source_begin, source_end, target_begin, stream); } return p_ret; } }; template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::string_view>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, hipStream_t stream) { auto target_end = target_begin + (source_end - source_begin); auto p_source_device_view = cudf::column_device_view::create(source, stream); if (source.has_nulls()) { return cudf::strings::detail::copy_range( cudf::detail::make_null_replacement_iterator<cudf::string_view>(*p_source_device_view, cudf::string_view()) + source_begin, cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin, cudf::strings_column_view(target), target_begin, target_end, mr, stream); } else { return cudf::strings::detail::copy_range( p_source_device_view->begin<cudf::string_view>() + source_begin, thrust::make_constant_iterator(true), cudf::strings_column_view(target), target_begin, target_end, mr, stream); } } template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::dictionary32>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, hipStream_t stream) { // check the keys in the source and target cudf::dictionary_column_view const dict_source(source); cudf::dictionary_column_view const dict_target(target); CUDF_EXPECTS(dict_source.keys().type() == dict_target.keys().type(), "dictionary keys must be the same type"); // combine keys so both dictionaries have the same set auto target_matched = cudf::dictionary::detail::add_keys(dict_target, dict_source.keys(), mr, stream); auto const target_view = cudf::dictionary_column_view(target_matched->view()); auto source_matched = cudf::dictionary::detail::set_keys( dict_source, target_view.keys(), rmm::mr::get_current_device_resource(), stream); auto const source_view = cudf::dictionary_column_view(source_matched->view()); // build the new indices by calling in_place_copy_range on just the indices auto const source_indices = source_view.get_indices_annotated(); auto target_contents = target_matched->release(); auto target_indices(std::move(target_contents.children.front())); cudf::mutable_column_view new_indices( target_indices->type(), dict_target.size(), target_indices->mutable_view().head(), static_cast<cudf::bitmask_type*>(target_contents.null_mask->data()), dict_target.null_count()); cudf::type_dispatcher(new_indices.type(), in_place_copy_range_dispatch{source_indices, new_indices}, source_begin, source_end, target_begin, stream); auto null_count = new_indices.null_count(); auto indices_column = std::make_unique<cudf::column>(new_indices.type(), new_indices.size(), std::move(*(target_indices->release().data.release())), rmm::device_buffer{0, stream, mr}, 0); // take the keys from the matched column allocated using mr auto keys_column(std::move(target_contents.children.back())); // create column with keys_column and indices_column return make_dictionary_column(std::move(keys_column), std::move(indices_column), std::move(*(target_contents.null_mask.release())), null_count); } template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::list_view>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_FAIL("list_view type not supported"); } } // namespace namespace cudf { namespace detail { void copy_range_in_place(column_view const& source, mutable_column_view& target, size_type source_begin, size_type source_end, size_type target_begin, hipStream_t stream) { CUDF_EXPECTS(cudf::is_fixed_width(target.type()) == true, "In-place copy_range does not support variable-sized types."); CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) && (source_begin <= source_end) && (target_begin >= 0) && (target_begin <= target.size() - (source_end - source_begin)), "Range is out of bounds."); CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch."); CUDF_EXPECTS((target.nullable() == true) || (source.has_nulls() == false), "target should be nullable if source has null values."); if (source_end != source_begin) { // otherwise no-op cudf::type_dispatcher(target.type(), in_place_copy_range_dispatch{source, target}, source_begin, source_end, target_begin, stream); } } std::unique_ptr<column> copy_range(column_view const& source, column_view const& target, size_type source_begin, size_type source_end, size_type target_begin, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) && (source_begin <= source_end) && (target_begin >= 0) && (target_begin <= target.size() - (source_end - source_begin)), "Range is out of bounds."); CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch."); return cudf::type_dispatcher(target.type(), out_of_place_copy_range_dispatch{source, target}, source_begin, source_end, target_begin, mr, stream); } } // namespace detail void copy_range_in_place(column_view const& source, mutable_column_view& target, size_type source_begin, size_type source_end, size_type target_begin) { CUDF_FUNC_RANGE(); return detail::copy_range_in_place(source, target, source_begin, source_end, target_begin, 0); } std::unique_ptr<column> copy_range(column_view const& source, column_view const& target, size_type source_begin, size_type source_end, size_type target_begin, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::copy_range(source, target, source_begin, source_end, target_begin, mr, 0); } } // namespace cudf
98933bd8ed7e5520afea0f711a1da3bad9f1a99e.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/copy_range.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/dictionary/detail/update_keys.hpp> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/strings/detail/copy_range.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <thrust/iterator/constant_iterator.h> #include <cuda_runtime.h> #include <memory> namespace { template <typename T> void in_place_copy_range(cudf::column_view const& source, cudf::mutable_column_view& target, cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, cudaStream_t stream = 0) { auto p_source_device_view = cudf::column_device_view::create(source, stream); if (source.has_nulls()) { cudf::detail::copy_range( cudf::detail::make_null_replacement_iterator<T>(*p_source_device_view, T()) + source_begin, cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin, target, target_begin, target_begin + (source_end - source_begin), stream); } else { cudf::detail::copy_range(p_source_device_view->begin<T>() + source_begin, thrust::make_constant_iterator(true), // dummy target, target_begin, target_begin + (source_end - source_begin), stream); } } struct in_place_copy_range_dispatch { cudf::column_view const& source; cudf::mutable_column_view& target; template <typename T> std::enable_if_t<cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, cudaStream_t stream = 0) { in_place_copy_range<T>(source, target, source_begin, source_end, target_begin, stream); } template <typename T> std::enable_if_t<not cudf::is_fixed_width<T>(), void> operator()(cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, cudaStream_t stream = 0) { CUDF_FAIL("in-place copy does not work for variable width types."); } }; struct out_of_place_copy_range_dispatch { cudf::column_view const& source; cudf::column_view const& target; template <typename T> std::unique_ptr<cudf::column> operator()( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource(), cudaStream_t stream = 0) { auto p_ret = std::make_unique<cudf::column>(target, stream, mr); if ((!p_ret->nullable()) && source.has_nulls(source_begin, source_end)) { p_ret->set_null_mask( cudf::create_null_mask(p_ret->size(), cudf::mask_state::ALL_VALID, stream, mr), 0); } if (source_end != source_begin) { // otherwise no-op auto ret_view = p_ret->mutable_view(); in_place_copy_range<T>(source, ret_view, source_begin, source_end, target_begin, stream); } return p_ret; } }; template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::string_view>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { auto target_end = target_begin + (source_end - source_begin); auto p_source_device_view = cudf::column_device_view::create(source, stream); if (source.has_nulls()) { return cudf::strings::detail::copy_range( cudf::detail::make_null_replacement_iterator<cudf::string_view>(*p_source_device_view, cudf::string_view()) + source_begin, cudf::detail::make_validity_iterator(*p_source_device_view) + source_begin, cudf::strings_column_view(target), target_begin, target_end, mr, stream); } else { return cudf::strings::detail::copy_range( p_source_device_view->begin<cudf::string_view>() + source_begin, thrust::make_constant_iterator(true), cudf::strings_column_view(target), target_begin, target_end, mr, stream); } } template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::dictionary32>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { // check the keys in the source and target cudf::dictionary_column_view const dict_source(source); cudf::dictionary_column_view const dict_target(target); CUDF_EXPECTS(dict_source.keys().type() == dict_target.keys().type(), "dictionary keys must be the same type"); // combine keys so both dictionaries have the same set auto target_matched = cudf::dictionary::detail::add_keys(dict_target, dict_source.keys(), mr, stream); auto const target_view = cudf::dictionary_column_view(target_matched->view()); auto source_matched = cudf::dictionary::detail::set_keys( dict_source, target_view.keys(), rmm::mr::get_current_device_resource(), stream); auto const source_view = cudf::dictionary_column_view(source_matched->view()); // build the new indices by calling in_place_copy_range on just the indices auto const source_indices = source_view.get_indices_annotated(); auto target_contents = target_matched->release(); auto target_indices(std::move(target_contents.children.front())); cudf::mutable_column_view new_indices( target_indices->type(), dict_target.size(), target_indices->mutable_view().head(), static_cast<cudf::bitmask_type*>(target_contents.null_mask->data()), dict_target.null_count()); cudf::type_dispatcher(new_indices.type(), in_place_copy_range_dispatch{source_indices, new_indices}, source_begin, source_end, target_begin, stream); auto null_count = new_indices.null_count(); auto indices_column = std::make_unique<cudf::column>(new_indices.type(), new_indices.size(), std::move(*(target_indices->release().data.release())), rmm::device_buffer{0, stream, mr}, 0); // take the keys from the matched column allocated using mr auto keys_column(std::move(target_contents.children.back())); // create column with keys_column and indices_column return make_dictionary_column(std::move(keys_column), std::move(indices_column), std::move(*(target_contents.null_mask.release())), null_count); } template <> std::unique_ptr<cudf::column> out_of_place_copy_range_dispatch::operator()<cudf::list_view>( cudf::size_type source_begin, cudf::size_type source_end, cudf::size_type target_begin, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_FAIL("list_view type not supported"); } } // namespace namespace cudf { namespace detail { void copy_range_in_place(column_view const& source, mutable_column_view& target, size_type source_begin, size_type source_end, size_type target_begin, cudaStream_t stream) { CUDF_EXPECTS(cudf::is_fixed_width(target.type()) == true, "In-place copy_range does not support variable-sized types."); CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) && (source_begin <= source_end) && (target_begin >= 0) && (target_begin <= target.size() - (source_end - source_begin)), "Range is out of bounds."); CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch."); CUDF_EXPECTS((target.nullable() == true) || (source.has_nulls() == false), "target should be nullable if source has null values."); if (source_end != source_begin) { // otherwise no-op cudf::type_dispatcher(target.type(), in_place_copy_range_dispatch{source, target}, source_begin, source_end, target_begin, stream); } } std::unique_ptr<column> copy_range(column_view const& source, column_view const& target, size_type source_begin, size_type source_end, size_type target_begin, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_EXPECTS((source_begin >= 0) && (source_end <= source.size()) && (source_begin <= source_end) && (target_begin >= 0) && (target_begin <= target.size() - (source_end - source_begin)), "Range is out of bounds."); CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch."); return cudf::type_dispatcher(target.type(), out_of_place_copy_range_dispatch{source, target}, source_begin, source_end, target_begin, mr, stream); } } // namespace detail void copy_range_in_place(column_view const& source, mutable_column_view& target, size_type source_begin, size_type source_end, size_type target_begin) { CUDF_FUNC_RANGE(); return detail::copy_range_in_place(source, target, source_begin, source_end, target_begin, 0); } std::unique_ptr<column> copy_range(column_view const& source, column_view const& target, size_type source_begin, size_type source_end, size_type target_begin, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::copy_range(source, target, source_begin, source_end, target_begin, mr, 0); } } // namespace cudf
4e24a592f7ac95ac45c5dd2fd158da5d20aa700f.hip
// !!! This is a file automatically generated by hipify!!! #include "cupoch/visualization/shader/normal_shader.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/utility/platform.h" #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const Eigen::Vector3f* vertex_normals, const int* triangles, const Eigen::Vector3f* triangle_normals, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), shade_option_(shade_option) {}; const Eigen::Vector3f* vertices_; const Eigen::Vector3f* vertex_normals_; const int* triangles_; const Eigen::Vector3f* triangle_normals_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t k) const { int i = k / 3; int vi = triangles_[k]; const auto &vertex = vertices_[vi]; return (shade_option_ == RenderOption::MeshShadeOption::FlatShade) ? thrust::make_tuple(vertex, triangle_normals_[i]) : thrust::make_tuple(vertex, vertex_normals_[vi]); } }; } bool NormalShader::Compile() { if (CompileShaders(normal_vertex_shader, NULL, normal_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); return true; } void NormalShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool NormalShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, hipGraphicsMapFlagsNone)); Eigen::Vector3f* raw_points_ptr; Eigen::Vector3f* raw_normals_ptr; size_t n_bytes; cudaSafeCall(hipGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool NormalShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); return true; } void NormalShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[1])); } glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); bound_ = false; } } bool NormalShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPointSize(GLfloat(option.point_size_)); return true; } bool NormalShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } if (pointcloud.HasNormals() == false) { PrintShaderWarning("Binding failed with pointcloud with no normals."); return false; } thrust::copy(pointcloud.points_.begin(), pointcloud.points_.end(), points); thrust::copy(pointcloud.normals_.begin(), pointcloud.normals_.end(), normals); draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t NormalShaderForPointCloud::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool NormalShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool NormalShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int*)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), option.mesh_shade_option_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t NormalShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; }
4e24a592f7ac95ac45c5dd2fd158da5d20aa700f.cu
#include "cupoch/visualization/shader/normal_shader.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/utility/platform.h" #include <cuda_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const Eigen::Vector3f* vertex_normals, const int* triangles, const Eigen::Vector3f* triangle_normals, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), shade_option_(shade_option) {}; const Eigen::Vector3f* vertices_; const Eigen::Vector3f* vertex_normals_; const int* triangles_; const Eigen::Vector3f* triangle_normals_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f> operator() (size_t k) const { int i = k / 3; int vi = triangles_[k]; const auto &vertex = vertices_[vi]; return (shade_option_ == RenderOption::MeshShadeOption::FlatShade) ? thrust::make_tuple(vertex, triangle_normals_[i]) : thrust::make_tuple(vertex, vertex_normals_[vi]); } }; } bool NormalShader::Compile() { if (CompileShaders(normal_vertex_shader, NULL, normal_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); return true; } void NormalShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool NormalShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f* raw_points_ptr; Eigen::Vector3f* raw_normals_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(2, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(2); bound_ = true; return true; } bool NormalShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); return true; } void NormalShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[1])); } glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); bound_ = false; } } bool NormalShaderForPointCloud::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPointSize(GLfloat(option.point_size_)); return true; } bool NormalShaderForPointCloud::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::PointCloud) { PrintShaderWarning("Rendering type is not geometry::PointCloud."); return false; } const geometry::PointCloud &pointcloud = (const geometry::PointCloud &)geometry; if (pointcloud.HasPoints() == false) { PrintShaderWarning("Binding failed with empty pointcloud."); return false; } if (pointcloud.HasNormals() == false) { PrintShaderWarning("Binding failed with pointcloud with no normals."); return false; } thrust::copy(pointcloud.points_.begin(), pointcloud.points_.end(), points); thrust::copy(pointcloud.normals_.begin(), pointcloud.normals_.end(), normals); draw_arrays_mode_ = GL_POINTS; draw_arrays_size_ = GLsizei(pointcloud.points_.size()); return true; } size_t NormalShaderForPointCloud::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::PointCloud &)geometry).points_.size(); } bool NormalShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } return true; } bool NormalShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int*)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), option.mesh_shade_option_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals), func); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t NormalShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; }
83fb60e7c019efb3391822b023865a9afd6a5918.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2022, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/stop/criterion_kernels.hpp" #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/stop/stopping_status.hpp> #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The Set all statuses namespace. * @ref set_status * @ingroup set_all_statuses */ namespace set_all_statuses { constexpr int default_block_size = 512; __global__ __launch_bounds__(default_block_size) void set_all_statuses( size_type num_elems, uint8 stoppingId, bool setFinalized, stopping_status* stop_status) { const auto tidx = thread::get_thread_id_flat(); if (tidx < num_elems) { stop_status[tidx].stop(stoppingId, setFinalized); } } void set_all_statuses(std::shared_ptr<const CudaExecutor> exec, uint8 stoppingId, bool setFinalized, array<stopping_status>* stop_status) { const auto block_size = default_block_size; const auto grid_size = ceildiv(stop_status->get_num_elems(), block_size); if (grid_size > 0) { hipLaunchKernelGGL(( set_all_statuses), dim3(grid_size), dim3(block_size), 0, 0, stop_status->get_num_elems(), stoppingId, setFinalized, as_cuda_type(stop_status->get_data())); } } } // namespace set_all_statuses } // namespace cuda } // namespace kernels } // namespace gko
83fb60e7c019efb3391822b023865a9afd6a5918.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2022, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/stop/criterion_kernels.hpp" #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/stop/stopping_status.hpp> #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/components/thread_ids.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The Set all statuses namespace. * @ref set_status * @ingroup set_all_statuses */ namespace set_all_statuses { constexpr int default_block_size = 512; __global__ __launch_bounds__(default_block_size) void set_all_statuses( size_type num_elems, uint8 stoppingId, bool setFinalized, stopping_status* stop_status) { const auto tidx = thread::get_thread_id_flat(); if (tidx < num_elems) { stop_status[tidx].stop(stoppingId, setFinalized); } } void set_all_statuses(std::shared_ptr<const CudaExecutor> exec, uint8 stoppingId, bool setFinalized, array<stopping_status>* stop_status) { const auto block_size = default_block_size; const auto grid_size = ceildiv(stop_status->get_num_elems(), block_size); if (grid_size > 0) { set_all_statuses<<<grid_size, block_size, 0, 0>>>( stop_status->get_num_elems(), stoppingId, setFinalized, as_cuda_type(stop_status->get_data())); } } } // namespace set_all_statuses } // namespace cuda } // namespace kernels } // namespace gko
7a45c7a02adf146cccd185ffbddb6749c7cfa2ed.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "row_filter.h" namespace filter { template void linearRow<int3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif /* CUDA_DISABLER */
7a45c7a02adf146cccd185ffbddb6749c7cfa2ed.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "row_filter.h" namespace filter { template void linearRow<int3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif /* CUDA_DISABLER */
b7a18e3448497384975b06d2bef27b7be02047c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <read_gauge.h> #include <gauge_quda.h> #include "gauge_force_quda.h" #define MULT_SU3_NN_TEST(ma, mb) do{ \ float fa_re,fa_im, fb_re, fb_im, fc_re, fc_im; \ fa_re = \ ma##00_re * mb##00_re - ma##00_im * mb##00_im + \ ma##01_re * mb##10_re - ma##01_im * mb##10_im + \ ma##02_re * mb##20_re - ma##02_im * mb##20_im; \ fa_im = \ ma##00_re * mb##00_im + ma##00_im * mb##00_re + \ ma##01_re * mb##10_im + ma##01_im * mb##10_re + \ ma##02_re * mb##20_im + ma##02_im * mb##20_re; \ fb_re = \ ma##00_re * mb##01_re - ma##00_im * mb##01_im + \ ma##01_re * mb##11_re - ma##01_im * mb##11_im + \ ma##02_re * mb##21_re - ma##02_im * mb##21_im; \ fb_im = \ ma##00_re * mb##01_im + ma##00_im * mb##01_re + \ ma##01_re * mb##11_im + ma##01_im * mb##11_re + \ ma##02_re * mb##21_im + ma##02_im * mb##21_re; \ fc_re = \ ma##00_re * mb##02_re - ma##00_im * mb##02_im + \ ma##01_re * mb##12_re - ma##01_im * mb##12_im + \ ma##02_re * mb##22_re - ma##02_im * mb##22_im; \ fc_im = \ ma##00_re * mb##02_im + ma##00_im * mb##02_re + \ ma##01_re * mb##12_im + ma##01_im * mb##12_re + \ ma##02_re * mb##22_im + ma##02_im * mb##22_re; \ ma##00_re = fa_re; \ ma##00_im = fa_im; \ ma##01_re = fb_re; \ ma##01_im = fb_im; \ ma##02_re = fc_re; \ ma##02_im = fc_im; \ fa_re = \ ma##10_re * mb##00_re - ma##10_im * mb##00_im + \ ma##11_re * mb##10_re - ma##11_im * mb##10_im + \ ma##12_re * mb##20_re - ma##12_im * mb##20_im; \ fa_im = \ ma##10_re * mb##00_im + ma##10_im * mb##00_re + \ ma##11_re * mb##10_im + ma##11_im * mb##10_re + \ ma##12_re * mb##20_im + ma##12_im * mb##20_re; \ fb_re = \ ma##10_re * mb##01_re - ma##10_im * mb##01_im + \ ma##11_re * mb##11_re - ma##11_im * mb##11_im + \ ma##12_re * mb##21_re - ma##12_im * mb##21_im; \ fb_im = \ ma##10_re * mb##01_im + ma##10_im * mb##01_re + \ ma##11_re * mb##11_im + ma##11_im * mb##11_re + \ ma##12_re * mb##21_im + ma##12_im * mb##21_re; \ fc_re = \ ma##10_re * mb##02_re - ma##10_im * mb##02_im + \ ma##11_re * mb##12_re - ma##11_im * mb##12_im + \ ma##12_re * mb##22_re - ma##12_im * mb##22_im; \ fc_im = \ ma##10_re * mb##02_im + ma##10_im * mb##02_re + \ ma##11_re * mb##12_im + ma##11_im * mb##12_re + \ ma##12_re * mb##22_im + ma##12_im * mb##22_re; \ ma##10_re = fa_re; \ ma##10_im = fa_im; \ ma##11_re = fb_re; \ ma##11_im = fb_im; \ ma##12_re = fc_re; \ ma##12_im = fc_im; \ fa_re = \ ma##20_re * mb##00_re - ma##20_im * mb##00_im + \ ma##21_re * mb##10_re - ma##21_im * mb##10_im + \ ma##22_re * mb##20_re - ma##22_im * mb##20_im; \ fa_im = \ ma##20_re * mb##00_im + ma##20_im * mb##00_re + \ ma##21_re * mb##10_im + ma##21_im * mb##10_re + \ ma##22_re * mb##20_im + ma##22_im * mb##20_re; \ fb_re = \ ma##20_re * mb##01_re - ma##20_im * mb##01_im + \ ma##21_re * mb##11_re - ma##21_im * mb##11_im + \ ma##22_re * mb##21_re - ma##22_im * mb##21_im; \ fb_im = \ ma##20_re * mb##01_im + ma##20_im * mb##01_re + \ ma##21_re * mb##11_im + ma##21_im * mb##11_re + \ ma##22_re * mb##21_im + ma##22_im * mb##21_re; \ fc_re = \ ma##20_re * mb##02_re - ma##20_im * mb##02_im + \ ma##21_re * mb##12_re - ma##21_im * mb##12_im + \ ma##22_re * mb##22_re - ma##22_im * mb##22_im; \ fc_im = \ ma##20_re * mb##02_im + ma##20_im * mb##02_re + \ ma##21_re * mb##12_im + ma##21_im * mb##12_re + \ ma##22_re * mb##22_im + ma##22_im * mb##22_re; \ ma##20_re = fa_re; \ ma##20_im = fa_im; \ ma##21_re = fb_re; \ ma##21_im = fb_im; \ ma##22_re = fc_re; \ ma##22_im = fc_im; \ }while(0) #define MULT_SU3_NA_TEST(ma, mb) do{ \ float fa_re, fa_im, fb_re, fb_im, fc_re, fc_im; \ fa_re = \ ma##00_re * mb##T00_re - ma##00_im * mb##T00_im + \ ma##01_re * mb##T10_re - ma##01_im * mb##T10_im + \ ma##02_re * mb##T20_re - ma##02_im * mb##T20_im; \ fa_im = \ ma##00_re * mb##T00_im + ma##00_im * mb##T00_re + \ ma##01_re * mb##T10_im + ma##01_im * mb##T10_re + \ ma##02_re * mb##T20_im + ma##02_im * mb##T20_re; \ fb_re = \ ma##00_re * mb##T01_re - ma##00_im * mb##T01_im + \ ma##01_re * mb##T11_re - ma##01_im * mb##T11_im + \ ma##02_re * mb##T21_re - ma##02_im * mb##T21_im; \ fb_im = \ ma##00_re * mb##T01_im + ma##00_im * mb##T01_re + \ ma##01_re * mb##T11_im + ma##01_im * mb##T11_re + \ ma##02_re * mb##T21_im + ma##02_im * mb##T21_re; \ fc_re = \ ma##00_re * mb##T02_re - ma##00_im * mb##T02_im + \ ma##01_re * mb##T12_re - ma##01_im * mb##T12_im + \ ma##02_re * mb##T22_re - ma##02_im * mb##T22_im; \ fc_im = \ ma##00_re * mb##T02_im + ma##00_im * mb##T02_re + \ ma##01_re * mb##T12_im + ma##01_im * mb##T12_re + \ ma##02_re * mb##T22_im + ma##02_im * mb##T22_re; \ ma##00_re = fa_re; \ ma##00_im = fa_im; \ ma##01_re = fb_re; \ ma##01_im = fb_im; \ ma##02_re = fc_re; \ ma##02_im = fc_im; \ fa_re = \ ma##10_re * mb##T00_re - ma##10_im * mb##T00_im + \ ma##11_re * mb##T10_re - ma##11_im * mb##T10_im + \ ma##12_re * mb##T20_re - ma##12_im * mb##T20_im; \ fa_im = \ ma##10_re * mb##T00_im + ma##10_im * mb##T00_re + \ ma##11_re * mb##T10_im + ma##11_im * mb##T10_re + \ ma##12_re * mb##T20_im + ma##12_im * mb##T20_re; \ fb_re = \ ma##10_re * mb##T01_re - ma##10_im * mb##T01_im + \ ma##11_re * mb##T11_re - ma##11_im * mb##T11_im + \ ma##12_re * mb##T21_re - ma##12_im * mb##T21_im; \ fb_im = \ ma##10_re * mb##T01_im + ma##10_im * mb##T01_re + \ ma##11_re * mb##T11_im + ma##11_im * mb##T11_re + \ ma##12_re * mb##T21_im + ma##12_im * mb##T21_re; \ fc_re = \ ma##10_re * mb##T02_re - ma##10_im * mb##T02_im + \ ma##11_re * mb##T12_re - ma##11_im * mb##T12_im + \ ma##12_re * mb##T22_re - ma##12_im * mb##T22_im; \ fc_im = \ ma##10_re * mb##T02_im + ma##10_im * mb##T02_re + \ ma##11_re * mb##T12_im + ma##11_im * mb##T12_re + \ ma##12_re * mb##T22_im + ma##12_im * mb##T22_re; \ ma##10_re = fa_re; \ ma##10_im = fa_im; \ ma##11_re = fb_re; \ ma##11_im = fb_im; \ ma##12_re = fc_re; \ ma##12_im = fc_im; \ fa_re = \ ma##20_re * mb##T00_re - ma##20_im * mb##T00_im + \ ma##21_re * mb##T10_re - ma##21_im * mb##T10_im + \ ma##22_re * mb##T20_re - ma##22_im * mb##T20_im; \ fa_im = \ ma##20_re * mb##T00_im + ma##20_im * mb##T00_re + \ ma##21_re * mb##T10_im + ma##21_im * mb##T10_re + \ ma##22_re * mb##T20_im + ma##22_im * mb##T20_re; \ fb_re = \ ma##20_re * mb##T01_re - ma##20_im * mb##T01_im + \ ma##21_re * mb##T11_re - ma##21_im * mb##T11_im + \ ma##22_re * mb##T21_re - ma##22_im * mb##T21_im; \ fb_im = \ ma##20_re * mb##T01_im + ma##20_im * mb##T01_re + \ ma##21_re * mb##T11_im + ma##21_im * mb##T11_re + \ ma##22_re * mb##T21_im + ma##22_im * mb##T21_re; \ fc_re = \ ma##20_re * mb##T02_re - ma##20_im * mb##T02_im + \ ma##21_re * mb##T12_re - ma##21_im * mb##T12_im + \ ma##22_re * mb##T22_re - ma##22_im * mb##T22_im; \ fc_im = \ ma##20_re * mb##T02_im + ma##20_im * mb##T02_re + \ ma##21_re * mb##T12_im + ma##21_im * mb##T12_re + \ ma##22_re * mb##T22_im + ma##22_im * mb##T22_re; \ ma##20_re = fa_re; \ ma##20_im = fa_im; \ ma##21_re = fb_re; \ ma##21_im = fb_im; \ ma##22_re = fc_re; \ ma##22_im = fc_im; \ }while(0) #define MULT_SU3_AN_TEST(ma, mb) do{ \ float fa_re, fa_im, fb_re, fb_im, fc_re, fc_im; \ fa_re = \ ma##T00_re * mb##00_re - ma##T00_im * mb##00_im + \ ma##T01_re * mb##10_re - ma##T01_im * mb##10_im + \ ma##T02_re * mb##20_re - ma##T02_im * mb##20_im; \ fa_im = \ ma##T00_re * mb##00_im + ma##T00_im * mb##00_re + \ ma##T01_re * mb##10_im + ma##T01_im * mb##10_re + \ ma##T02_re * mb##20_im + ma##T02_im * mb##20_re; \ fb_re = \ ma##T10_re * mb##00_re - ma##T10_im * mb##00_im + \ ma##T11_re * mb##10_re - ma##T11_im * mb##10_im + \ ma##T12_re * mb##20_re - ma##T12_im * mb##20_im; \ fb_im = \ ma##T10_re * mb##00_im + ma##T10_im * mb##00_re + \ ma##T11_re * mb##10_im + ma##T11_im * mb##10_re + \ ma##T12_re * mb##20_im + ma##T12_im * mb##20_re; \ fc_re = \ ma##T20_re * mb##00_re - ma##T20_im * mb##00_im + \ ma##T21_re * mb##10_re - ma##T21_im * mb##10_im + \ ma##T22_re * mb##20_re - ma##T22_im * mb##20_im; \ fc_im = \ ma##T20_re * mb##00_im + ma##T20_im * mb##00_re + \ ma##T21_re * mb##10_im + ma##T21_im * mb##10_re + \ ma##T22_re * mb##20_im + ma##T22_im * mb##20_re; \ mb##00_re = fa_re; \ mb##00_im = fa_im; \ mb##10_re = fb_re; \ mb##10_im = fb_im; \ mb##20_re = fc_re; \ mb##20_im = fc_im; \ fa_re = \ ma##T00_re * mb##01_re - ma##T00_im * mb##01_im + \ ma##T01_re * mb##11_re - ma##T01_im * mb##11_im + \ ma##T02_re * mb##21_re - ma##T02_im * mb##21_im; \ fa_im = \ ma##T00_re * mb##01_im + ma##T00_im * mb##01_re + \ ma##T01_re * mb##11_im + ma##T01_im * mb##11_re + \ ma##T02_re * mb##21_im + ma##T02_im * mb##21_re; \ fb_re = \ ma##T10_re * mb##01_re - ma##T10_im * mb##01_im + \ ma##T11_re * mb##11_re - ma##T11_im * mb##11_im + \ ma##T12_re * mb##21_re - ma##T12_im * mb##21_im; \ fb_im = \ ma##T10_re * mb##01_im + ma##T10_im * mb##01_re + \ ma##T11_re * mb##11_im + ma##T11_im * mb##11_re + \ ma##T12_re * mb##21_im + ma##T12_im * mb##21_re; \ fc_re = \ ma##T20_re * mb##01_re - ma##T20_im * mb##01_im + \ ma##T21_re * mb##11_re - ma##T21_im * mb##11_im + \ ma##T22_re * mb##21_re - ma##T22_im * mb##21_im; \ fc_im = \ ma##T20_re * mb##01_im + ma##T20_im * mb##01_re + \ ma##T21_re * mb##11_im + ma##T21_im * mb##11_re + \ ma##T22_re * mb##21_im + ma##T22_im * mb##21_re; \ mb##01_re = fa_re; \ mb##01_im = fa_im; \ mb##11_re = fb_re; \ mb##11_im = fb_im; \ mb##21_re = fc_re; \ mb##21_im = fc_im; \ fa_re = \ ma##T00_re * mb##02_re - ma##T00_im * mb##02_im + \ ma##T01_re * mb##12_re - ma##T01_im * mb##12_im + \ ma##T02_re * mb##22_re - ma##T02_im * mb##22_im; \ fa_im = \ ma##T00_re * mb##02_im + ma##T00_im * mb##02_re + \ ma##T01_re * mb##12_im + ma##T01_im * mb##12_re + \ ma##T02_re * mb##22_im + ma##T02_im * mb##22_re; \ fb_re = \ ma##T10_re * mb##02_re - ma##T10_im * mb##02_im + \ ma##T11_re * mb##12_re - ma##T11_im * mb##12_im + \ ma##T12_re * mb##22_re - ma##T12_im * mb##22_im; \ fb_im = \ ma##T10_re * mb##02_im + ma##T10_im * mb##02_re + \ ma##T11_re * mb##12_im + ma##T11_im * mb##12_re + \ ma##T12_re * mb##22_im + ma##T12_im * mb##22_re; \ fc_re = \ ma##T20_re * mb##02_re - ma##T20_im * mb##02_im + \ ma##T21_re * mb##12_re - ma##T21_im * mb##12_im + \ ma##T22_re * mb##22_re - ma##T22_im * mb##22_im; \ fc_im = \ ma##T20_re * mb##02_im + ma##T20_im * mb##02_re + \ ma##T21_re * mb##12_im + ma##T21_im * mb##12_re + \ ma##T22_re * mb##22_im + ma##T22_im * mb##22_re; \ mb##02_re = fa_re; \ mb##02_im = fa_im; \ mb##12_re = fb_re; \ mb##12_im = fb_im; \ mb##22_re = fc_re; \ mb##22_im = fc_im; \ }while(0) #define GF_SITE_MATRIX_LOAD_TEX 1 #if (GF_SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink0TexSingle_recon, dir, idx, var) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink1TexSingle_recon, dir, idx, var) #else #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkEven, dir, idx, var) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkOdd, dir, idx, var) #endif #define LOAD_MATRIX LOAD_MATRIX_12_SINGLE #define LOAD_ANTI_HERMITIAN LOAD_ANTI_HERMITIAN_SINGLE #define WRITE_ANTI_HERMITIAN WRITE_ANTI_HERMITIAN_SINGLE #define RECONSTRUCT_MATRIX RECONSTRUCT_LINK_12 __constant__ int path_max_length; void gauge_force_init_cuda(QudaGaugeParam* param, int path_max_length) { #ifdef MULTI_GPU #error "multi gpu is not supported for gauge force computation" #endif static int gauge_force_init_cuda_flag = 0; if (gauge_force_init_cuda_flag){ return; } gauge_force_init_cuda_flag=1; init_kernel_cuda(param); hipMemcpyToSymbol("path_max_length", &path_max_length, sizeof(int)); } #define COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mydir, idx) do { \ switch(mydir){ \ case 0: \ new_mem_idx = ( (new_x1==X1m1)?idx-X1m1:idx+1); \ new_x1 = (new_x1==X1m1)?0:new_x1+1; \ break; \ case 1: \ new_mem_idx = ( (new_x2==X2m1)?idx-X2X1mX1:idx+X1); \ new_x2 = (new_x2==X2m1)?0:new_x2+1; \ break; \ case 2: \ new_mem_idx = ( (new_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \ new_x3 = (new_x3==X3m1)?0:new_x3+1; \ break; \ case 3: \ new_mem_idx = ( (new_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \ new_x4 = (new_x4==X4m1)?0:new_x4+1; \ break; \ } \ }while(0) #define COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mydir, idx) do { \ switch(mydir){ \ case 0: \ new_mem_idx = ( (new_x1==0)?idx+X1m1:idx-1); \ new_x1 = (new_x1==0)?X1m1:new_x1 - 1; \ break; \ case 1: \ new_mem_idx = ( (new_x2==0)?idx+X2X1mX1:idx-X1); \ new_x2 = (new_x2==0)?X2m1:new_x2 - 1; \ break; \ case 2: \ new_mem_idx = ( (new_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \ new_x3 = (new_x3==0)?X3m1:new_x3 - 1; \ break; \ case 3: \ new_mem_idx = ( (new_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \ new_x4 = (new_x4==0)?X4m1:new_x4 - 1; \ break; \ } \ }while(0) #define GF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, i1,i2,i3,i4) do { \ sign =1; \ switch(dir){ \ case XUP: \ if ( (i4 & 1) == 1){ \ sign = 1; \ } \ break; \ case YUP: \ if ( ((i4+i1) & 1) == 1){ \ sign = 1; \ } \ break; \ case ZUP: \ if ( ((i4+i1+i2) & 1) == 1){ \ sign = 1; \ } \ break; \ case TUP: \ if (i4 == X4m1 ){ \ sign = 1; \ } \ break; \ } \ }while (0) //for now we only consider 12-reconstruct and single precision template<int oddBit> __global__ void parity_compute_gauge_force_kernel(float2* momEven, float2* momOdd, int dir, double eb3, float4* linkEven, float4* linkOdd, int* input_path, int* length, float* path_coeff, int num_paths) { int i,j=0; int sid = blockIdx.x * blockDim.x + threadIdx.x; int z1 = sid / X1h; int x1h = sid - z1*X1h; int z2 = z1 / X2; int x2 = z1 - z2*X2; int x4 = z2 / X3; int x3 = z2 - x4*X3; int x1odd = (x2 + x3 + x4 + oddBit) & 1; int x1 = 2*x1h + x1odd; int X = 2*sid + x1odd; int sign = 1; float2* mymom=momEven; if (oddBit){ mymom = momOdd; } float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4; float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4; float2 STAPLE0, STAPLE1, STAPLE2, STAPLE3,STAPLE4, STAPLE5, STAPLE6, STAPLE7, STAPLE8; float2 AH0, AH1, AH2, AH3, AH4; int new_mem_idx; SET_SU3_MATRIX(staple, 0); for(i=0;i < num_paths; i++){ int nbr_oddbit = (oddBit^1 ); int new_x1 =x1; int new_x2 =x2; int new_x3 =x3; int new_x4 =x4; COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(dir, X); //linka: current matrix //linkb: the loaded matrix in this round SET_UNIT_SU3_MATRIX(linka); int* path = input_path + i*path_max_length; int lnkdir; int path0 = path[0]; if (GOES_FORWARDS(path0)){ lnkdir=path0; }else{ lnkdir=OPP_DIR(path0); COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(path0), new_mem_idx); nbr_oddbit = nbr_oddbit^1; } int nbr_idx = new_mem_idx >>1; if (nbr_oddbit){ LOAD_ODD_MATRIX( lnkdir, nbr_idx, LINKB); }else{ LOAD_EVEN_MATRIX( lnkdir, nbr_idx, LINKB); } GF_COMPUTE_RECONSTRUCT_SIGN(sign, lnkdir, new_x1, new_x2, new_x3, new_x4); RECONSTRUCT_MATRIX(lnkdir, nbr_idx, sign, linkb); if (GOES_FORWARDS(path0)){ COPY_SU3_MATRIX(linkb, linka); COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(path0, new_mem_idx); nbr_oddbit = nbr_oddbit^1; }else{ SU3_ADJOINT(linkb, linka); } for(j=1; j < length[i]; j++){ int lnkdir; int pathj = path[j]; if (GOES_FORWARDS(pathj)){ lnkdir=pathj; }else{ lnkdir=OPP_DIR(pathj); COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(pathj), new_mem_idx); nbr_oddbit = nbr_oddbit^1; } int nbr_idx = new_mem_idx >>1; if (nbr_oddbit){ LOAD_ODD_MATRIX(lnkdir, nbr_idx, LINKB); }else{ LOAD_EVEN_MATRIX(lnkdir, nbr_idx, LINKB); } GF_COMPUTE_RECONSTRUCT_SIGN(sign, lnkdir, new_x1, new_x2, new_x3, new_x4); RECONSTRUCT_MATRIX(lnkdir, nbr_idx, sign, linkb); if (GOES_FORWARDS(pathj)){ MULT_SU3_NN_TEST(linka, linkb); COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(pathj, new_mem_idx); nbr_oddbit = nbr_oddbit^1; }else{ MULT_SU3_NA_TEST(linka, linkb); } }//j SCALAR_MULT_ADD_SU3_MATRIX(staple, linka, path_coeff[i], staple); }//i //update mom if (oddBit){ LOAD_ODD_MATRIX(dir, sid, LINKA); }else{ LOAD_EVEN_MATRIX(dir, sid, LINKA); } GF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, x1, x2, x3, x4); RECONSTRUCT_MATRIX(dir, sid, sign, linka); MULT_SU3_NN_TEST(linka, staple); LOAD_ANTI_HERMITIAN(mymom, dir, sid, AH); UNCOMPRESS_ANTI_HERMITIAN(ah, linkb); SCALAR_MULT_SUB_SU3_MATRIX(linkb, linka, eb3, linka); MAKE_ANTI_HERMITIAN(linka, ah); WRITE_ANTI_HERMITIAN(mymom, dir, sid, AH); return; } void gauge_force_cuda(FullMom cudaMom, int dir, double eb3, FullGauge cudaSiteLink, QudaGaugeParam* param, int** input_path, int* length, void* path_coeff, int num_paths, int max_length) { int i, j; //input_path int bytes = num_paths*max_length* sizeof(int); int* input_path_d; hipMalloc((void**)&input_path_d, bytes); checkCudaError(); hipMemset(input_path_d, 0, bytes);checkCudaError(); int* input_path_h = (int*)malloc(bytes); if (input_path_h == NULL){ printf("ERROR: malloc failed for input_path_h in function %s\n", __FUNCTION__); exit(1); } memset(input_path_h, 0, bytes); for(i=0;i < num_paths;i++){ for(j=0; j < length[i]; j++){ input_path_h[i*max_length + j] =input_path[i][j]; } } hipMemcpy(input_path_d, input_path_h, bytes, hipMemcpyHostToDevice); checkCudaError(); //length int* length_d; hipMalloc((void**)&length_d, num_paths*sizeof(int)); checkCudaError(); hipMemcpy(length_d, length, num_paths*sizeof(int), hipMemcpyHostToDevice); checkCudaError(); //path_coeff int gsize; if (param->cuda_prec == QUDA_DOUBLE_PRECISION){ gsize = sizeof(double); }else{ gsize= sizeof(float); } void* path_coeff_d; hipMalloc((void**)&path_coeff_d, num_paths*gsize); checkCudaError(); hipMemcpy(path_coeff_d, path_coeff, num_paths*gsize, hipMemcpyHostToDevice); checkCudaError(); //compute the gauge forces int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3]; dim3 blockDim(BLOCK_DIM, 1,1); dim3 gridDim(volume/blockDim.x, 1, 1); dim3 halfGridDim(volume/(2*blockDim.x), 1, 1); float2* momEven = (float2*)cudaMom.even; float2* momOdd = (float2*)cudaMom.odd; float4* linkEven = (float4*)cudaSiteLink.even; float4* linkOdd = (float4*)cudaSiteLink.odd; hipBindTexture(0, siteLink0TexSingle_recon, cudaSiteLink.even, cudaSiteLink.bytes); hipBindTexture(0, siteLink1TexSingle_recon, cudaSiteLink.odd, cudaSiteLink.bytes); hipLaunchKernelGGL(( parity_compute_gauge_force_kernel<0>), dim3(halfGridDim), dim3(blockDim), 0, 0, momEven, momOdd, dir, eb3, linkEven, linkOdd, input_path_d, length_d, (float*)path_coeff_d, num_paths); //odd /* The reason we do not switch the even/odd function input paramemters and the texture binding * is that we use the oddbit to decided where to load, in the kernel function */ hipLaunchKernelGGL(( parity_compute_gauge_force_kernel<1>), dim3(halfGridDim), dim3(blockDim), 0, 0, momEven, momOdd, dir, eb3, linkEven, linkOdd, input_path_d, length_d, (float*)path_coeff_d, num_paths); hipUnbindTexture(siteLink0TexSingle_recon); hipUnbindTexture(siteLink1TexSingle_recon); checkCudaError(); hipFree(input_path_d); checkCudaError(); free(input_path_h); hipFree(length_d); hipFree(path_coeff_d); } #undef LOAD_EVEN_MATRIX #undef LOAD_ODD_MATRIX #undef LOAD_MATRIX #undef LOAD_ANTI_HERMITIAN #undef WRITE_ANTI_HERMITIAN #undef RECONSTRUCT_MATRIX
b7a18e3448497384975b06d2bef27b7be02047c9.cu
#include <read_gauge.h> #include <gauge_quda.h> #include "gauge_force_quda.h" #define MULT_SU3_NN_TEST(ma, mb) do{ \ float fa_re,fa_im, fb_re, fb_im, fc_re, fc_im; \ fa_re = \ ma##00_re * mb##00_re - ma##00_im * mb##00_im + \ ma##01_re * mb##10_re - ma##01_im * mb##10_im + \ ma##02_re * mb##20_re - ma##02_im * mb##20_im; \ fa_im = \ ma##00_re * mb##00_im + ma##00_im * mb##00_re + \ ma##01_re * mb##10_im + ma##01_im * mb##10_re + \ ma##02_re * mb##20_im + ma##02_im * mb##20_re; \ fb_re = \ ma##00_re * mb##01_re - ma##00_im * mb##01_im + \ ma##01_re * mb##11_re - ma##01_im * mb##11_im + \ ma##02_re * mb##21_re - ma##02_im * mb##21_im; \ fb_im = \ ma##00_re * mb##01_im + ma##00_im * mb##01_re + \ ma##01_re * mb##11_im + ma##01_im * mb##11_re + \ ma##02_re * mb##21_im + ma##02_im * mb##21_re; \ fc_re = \ ma##00_re * mb##02_re - ma##00_im * mb##02_im + \ ma##01_re * mb##12_re - ma##01_im * mb##12_im + \ ma##02_re * mb##22_re - ma##02_im * mb##22_im; \ fc_im = \ ma##00_re * mb##02_im + ma##00_im * mb##02_re + \ ma##01_re * mb##12_im + ma##01_im * mb##12_re + \ ma##02_re * mb##22_im + ma##02_im * mb##22_re; \ ma##00_re = fa_re; \ ma##00_im = fa_im; \ ma##01_re = fb_re; \ ma##01_im = fb_im; \ ma##02_re = fc_re; \ ma##02_im = fc_im; \ fa_re = \ ma##10_re * mb##00_re - ma##10_im * mb##00_im + \ ma##11_re * mb##10_re - ma##11_im * mb##10_im + \ ma##12_re * mb##20_re - ma##12_im * mb##20_im; \ fa_im = \ ma##10_re * mb##00_im + ma##10_im * mb##00_re + \ ma##11_re * mb##10_im + ma##11_im * mb##10_re + \ ma##12_re * mb##20_im + ma##12_im * mb##20_re; \ fb_re = \ ma##10_re * mb##01_re - ma##10_im * mb##01_im + \ ma##11_re * mb##11_re - ma##11_im * mb##11_im + \ ma##12_re * mb##21_re - ma##12_im * mb##21_im; \ fb_im = \ ma##10_re * mb##01_im + ma##10_im * mb##01_re + \ ma##11_re * mb##11_im + ma##11_im * mb##11_re + \ ma##12_re * mb##21_im + ma##12_im * mb##21_re; \ fc_re = \ ma##10_re * mb##02_re - ma##10_im * mb##02_im + \ ma##11_re * mb##12_re - ma##11_im * mb##12_im + \ ma##12_re * mb##22_re - ma##12_im * mb##22_im; \ fc_im = \ ma##10_re * mb##02_im + ma##10_im * mb##02_re + \ ma##11_re * mb##12_im + ma##11_im * mb##12_re + \ ma##12_re * mb##22_im + ma##12_im * mb##22_re; \ ma##10_re = fa_re; \ ma##10_im = fa_im; \ ma##11_re = fb_re; \ ma##11_im = fb_im; \ ma##12_re = fc_re; \ ma##12_im = fc_im; \ fa_re = \ ma##20_re * mb##00_re - ma##20_im * mb##00_im + \ ma##21_re * mb##10_re - ma##21_im * mb##10_im + \ ma##22_re * mb##20_re - ma##22_im * mb##20_im; \ fa_im = \ ma##20_re * mb##00_im + ma##20_im * mb##00_re + \ ma##21_re * mb##10_im + ma##21_im * mb##10_re + \ ma##22_re * mb##20_im + ma##22_im * mb##20_re; \ fb_re = \ ma##20_re * mb##01_re - ma##20_im * mb##01_im + \ ma##21_re * mb##11_re - ma##21_im * mb##11_im + \ ma##22_re * mb##21_re - ma##22_im * mb##21_im; \ fb_im = \ ma##20_re * mb##01_im + ma##20_im * mb##01_re + \ ma##21_re * mb##11_im + ma##21_im * mb##11_re + \ ma##22_re * mb##21_im + ma##22_im * mb##21_re; \ fc_re = \ ma##20_re * mb##02_re - ma##20_im * mb##02_im + \ ma##21_re * mb##12_re - ma##21_im * mb##12_im + \ ma##22_re * mb##22_re - ma##22_im * mb##22_im; \ fc_im = \ ma##20_re * mb##02_im + ma##20_im * mb##02_re + \ ma##21_re * mb##12_im + ma##21_im * mb##12_re + \ ma##22_re * mb##22_im + ma##22_im * mb##22_re; \ ma##20_re = fa_re; \ ma##20_im = fa_im; \ ma##21_re = fb_re; \ ma##21_im = fb_im; \ ma##22_re = fc_re; \ ma##22_im = fc_im; \ }while(0) #define MULT_SU3_NA_TEST(ma, mb) do{ \ float fa_re, fa_im, fb_re, fb_im, fc_re, fc_im; \ fa_re = \ ma##00_re * mb##T00_re - ma##00_im * mb##T00_im + \ ma##01_re * mb##T10_re - ma##01_im * mb##T10_im + \ ma##02_re * mb##T20_re - ma##02_im * mb##T20_im; \ fa_im = \ ma##00_re * mb##T00_im + ma##00_im * mb##T00_re + \ ma##01_re * mb##T10_im + ma##01_im * mb##T10_re + \ ma##02_re * mb##T20_im + ma##02_im * mb##T20_re; \ fb_re = \ ma##00_re * mb##T01_re - ma##00_im * mb##T01_im + \ ma##01_re * mb##T11_re - ma##01_im * mb##T11_im + \ ma##02_re * mb##T21_re - ma##02_im * mb##T21_im; \ fb_im = \ ma##00_re * mb##T01_im + ma##00_im * mb##T01_re + \ ma##01_re * mb##T11_im + ma##01_im * mb##T11_re + \ ma##02_re * mb##T21_im + ma##02_im * mb##T21_re; \ fc_re = \ ma##00_re * mb##T02_re - ma##00_im * mb##T02_im + \ ma##01_re * mb##T12_re - ma##01_im * mb##T12_im + \ ma##02_re * mb##T22_re - ma##02_im * mb##T22_im; \ fc_im = \ ma##00_re * mb##T02_im + ma##00_im * mb##T02_re + \ ma##01_re * mb##T12_im + ma##01_im * mb##T12_re + \ ma##02_re * mb##T22_im + ma##02_im * mb##T22_re; \ ma##00_re = fa_re; \ ma##00_im = fa_im; \ ma##01_re = fb_re; \ ma##01_im = fb_im; \ ma##02_re = fc_re; \ ma##02_im = fc_im; \ fa_re = \ ma##10_re * mb##T00_re - ma##10_im * mb##T00_im + \ ma##11_re * mb##T10_re - ma##11_im * mb##T10_im + \ ma##12_re * mb##T20_re - ma##12_im * mb##T20_im; \ fa_im = \ ma##10_re * mb##T00_im + ma##10_im * mb##T00_re + \ ma##11_re * mb##T10_im + ma##11_im * mb##T10_re + \ ma##12_re * mb##T20_im + ma##12_im * mb##T20_re; \ fb_re = \ ma##10_re * mb##T01_re - ma##10_im * mb##T01_im + \ ma##11_re * mb##T11_re - ma##11_im * mb##T11_im + \ ma##12_re * mb##T21_re - ma##12_im * mb##T21_im; \ fb_im = \ ma##10_re * mb##T01_im + ma##10_im * mb##T01_re + \ ma##11_re * mb##T11_im + ma##11_im * mb##T11_re + \ ma##12_re * mb##T21_im + ma##12_im * mb##T21_re; \ fc_re = \ ma##10_re * mb##T02_re - ma##10_im * mb##T02_im + \ ma##11_re * mb##T12_re - ma##11_im * mb##T12_im + \ ma##12_re * mb##T22_re - ma##12_im * mb##T22_im; \ fc_im = \ ma##10_re * mb##T02_im + ma##10_im * mb##T02_re + \ ma##11_re * mb##T12_im + ma##11_im * mb##T12_re + \ ma##12_re * mb##T22_im + ma##12_im * mb##T22_re; \ ma##10_re = fa_re; \ ma##10_im = fa_im; \ ma##11_re = fb_re; \ ma##11_im = fb_im; \ ma##12_re = fc_re; \ ma##12_im = fc_im; \ fa_re = \ ma##20_re * mb##T00_re - ma##20_im * mb##T00_im + \ ma##21_re * mb##T10_re - ma##21_im * mb##T10_im + \ ma##22_re * mb##T20_re - ma##22_im * mb##T20_im; \ fa_im = \ ma##20_re * mb##T00_im + ma##20_im * mb##T00_re + \ ma##21_re * mb##T10_im + ma##21_im * mb##T10_re + \ ma##22_re * mb##T20_im + ma##22_im * mb##T20_re; \ fb_re = \ ma##20_re * mb##T01_re - ma##20_im * mb##T01_im + \ ma##21_re * mb##T11_re - ma##21_im * mb##T11_im + \ ma##22_re * mb##T21_re - ma##22_im * mb##T21_im; \ fb_im = \ ma##20_re * mb##T01_im + ma##20_im * mb##T01_re + \ ma##21_re * mb##T11_im + ma##21_im * mb##T11_re + \ ma##22_re * mb##T21_im + ma##22_im * mb##T21_re; \ fc_re = \ ma##20_re * mb##T02_re - ma##20_im * mb##T02_im + \ ma##21_re * mb##T12_re - ma##21_im * mb##T12_im + \ ma##22_re * mb##T22_re - ma##22_im * mb##T22_im; \ fc_im = \ ma##20_re * mb##T02_im + ma##20_im * mb##T02_re + \ ma##21_re * mb##T12_im + ma##21_im * mb##T12_re + \ ma##22_re * mb##T22_im + ma##22_im * mb##T22_re; \ ma##20_re = fa_re; \ ma##20_im = fa_im; \ ma##21_re = fb_re; \ ma##21_im = fb_im; \ ma##22_re = fc_re; \ ma##22_im = fc_im; \ }while(0) #define MULT_SU3_AN_TEST(ma, mb) do{ \ float fa_re, fa_im, fb_re, fb_im, fc_re, fc_im; \ fa_re = \ ma##T00_re * mb##00_re - ma##T00_im * mb##00_im + \ ma##T01_re * mb##10_re - ma##T01_im * mb##10_im + \ ma##T02_re * mb##20_re - ma##T02_im * mb##20_im; \ fa_im = \ ma##T00_re * mb##00_im + ma##T00_im * mb##00_re + \ ma##T01_re * mb##10_im + ma##T01_im * mb##10_re + \ ma##T02_re * mb##20_im + ma##T02_im * mb##20_re; \ fb_re = \ ma##T10_re * mb##00_re - ma##T10_im * mb##00_im + \ ma##T11_re * mb##10_re - ma##T11_im * mb##10_im + \ ma##T12_re * mb##20_re - ma##T12_im * mb##20_im; \ fb_im = \ ma##T10_re * mb##00_im + ma##T10_im * mb##00_re + \ ma##T11_re * mb##10_im + ma##T11_im * mb##10_re + \ ma##T12_re * mb##20_im + ma##T12_im * mb##20_re; \ fc_re = \ ma##T20_re * mb##00_re - ma##T20_im * mb##00_im + \ ma##T21_re * mb##10_re - ma##T21_im * mb##10_im + \ ma##T22_re * mb##20_re - ma##T22_im * mb##20_im; \ fc_im = \ ma##T20_re * mb##00_im + ma##T20_im * mb##00_re + \ ma##T21_re * mb##10_im + ma##T21_im * mb##10_re + \ ma##T22_re * mb##20_im + ma##T22_im * mb##20_re; \ mb##00_re = fa_re; \ mb##00_im = fa_im; \ mb##10_re = fb_re; \ mb##10_im = fb_im; \ mb##20_re = fc_re; \ mb##20_im = fc_im; \ fa_re = \ ma##T00_re * mb##01_re - ma##T00_im * mb##01_im + \ ma##T01_re * mb##11_re - ma##T01_im * mb##11_im + \ ma##T02_re * mb##21_re - ma##T02_im * mb##21_im; \ fa_im = \ ma##T00_re * mb##01_im + ma##T00_im * mb##01_re + \ ma##T01_re * mb##11_im + ma##T01_im * mb##11_re + \ ma##T02_re * mb##21_im + ma##T02_im * mb##21_re; \ fb_re = \ ma##T10_re * mb##01_re - ma##T10_im * mb##01_im + \ ma##T11_re * mb##11_re - ma##T11_im * mb##11_im + \ ma##T12_re * mb##21_re - ma##T12_im * mb##21_im; \ fb_im = \ ma##T10_re * mb##01_im + ma##T10_im * mb##01_re + \ ma##T11_re * mb##11_im + ma##T11_im * mb##11_re + \ ma##T12_re * mb##21_im + ma##T12_im * mb##21_re; \ fc_re = \ ma##T20_re * mb##01_re - ma##T20_im * mb##01_im + \ ma##T21_re * mb##11_re - ma##T21_im * mb##11_im + \ ma##T22_re * mb##21_re - ma##T22_im * mb##21_im; \ fc_im = \ ma##T20_re * mb##01_im + ma##T20_im * mb##01_re + \ ma##T21_re * mb##11_im + ma##T21_im * mb##11_re + \ ma##T22_re * mb##21_im + ma##T22_im * mb##21_re; \ mb##01_re = fa_re; \ mb##01_im = fa_im; \ mb##11_re = fb_re; \ mb##11_im = fb_im; \ mb##21_re = fc_re; \ mb##21_im = fc_im; \ fa_re = \ ma##T00_re * mb##02_re - ma##T00_im * mb##02_im + \ ma##T01_re * mb##12_re - ma##T01_im * mb##12_im + \ ma##T02_re * mb##22_re - ma##T02_im * mb##22_im; \ fa_im = \ ma##T00_re * mb##02_im + ma##T00_im * mb##02_re + \ ma##T01_re * mb##12_im + ma##T01_im * mb##12_re + \ ma##T02_re * mb##22_im + ma##T02_im * mb##22_re; \ fb_re = \ ma##T10_re * mb##02_re - ma##T10_im * mb##02_im + \ ma##T11_re * mb##12_re - ma##T11_im * mb##12_im + \ ma##T12_re * mb##22_re - ma##T12_im * mb##22_im; \ fb_im = \ ma##T10_re * mb##02_im + ma##T10_im * mb##02_re + \ ma##T11_re * mb##12_im + ma##T11_im * mb##12_re + \ ma##T12_re * mb##22_im + ma##T12_im * mb##22_re; \ fc_re = \ ma##T20_re * mb##02_re - ma##T20_im * mb##02_im + \ ma##T21_re * mb##12_re - ma##T21_im * mb##12_im + \ ma##T22_re * mb##22_re - ma##T22_im * mb##22_im; \ fc_im = \ ma##T20_re * mb##02_im + ma##T20_im * mb##02_re + \ ma##T21_re * mb##12_im + ma##T21_im * mb##12_re + \ ma##T22_re * mb##22_im + ma##T22_im * mb##22_re; \ mb##02_re = fa_re; \ mb##02_im = fa_im; \ mb##12_re = fb_re; \ mb##12_im = fb_im; \ mb##22_re = fc_re; \ mb##22_im = fc_im; \ }while(0) #define GF_SITE_MATRIX_LOAD_TEX 1 #if (GF_SITE_MATRIX_LOAD_TEX == 1) #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink0TexSingle_recon, dir, idx, var) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE_TEX(siteLink1TexSingle_recon, dir, idx, var) #else #define LOAD_EVEN_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkEven, dir, idx, var) #define LOAD_ODD_MATRIX(dir, idx, var) LOAD_MATRIX_12_SINGLE(linkOdd, dir, idx, var) #endif #define LOAD_MATRIX LOAD_MATRIX_12_SINGLE #define LOAD_ANTI_HERMITIAN LOAD_ANTI_HERMITIAN_SINGLE #define WRITE_ANTI_HERMITIAN WRITE_ANTI_HERMITIAN_SINGLE #define RECONSTRUCT_MATRIX RECONSTRUCT_LINK_12 __constant__ int path_max_length; void gauge_force_init_cuda(QudaGaugeParam* param, int path_max_length) { #ifdef MULTI_GPU #error "multi gpu is not supported for gauge force computation" #endif static int gauge_force_init_cuda_flag = 0; if (gauge_force_init_cuda_flag){ return; } gauge_force_init_cuda_flag=1; init_kernel_cuda(param); cudaMemcpyToSymbol("path_max_length", &path_max_length, sizeof(int)); } #define COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(mydir, idx) do { \ switch(mydir){ \ case 0: \ new_mem_idx = ( (new_x1==X1m1)?idx-X1m1:idx+1); \ new_x1 = (new_x1==X1m1)?0:new_x1+1; \ break; \ case 1: \ new_mem_idx = ( (new_x2==X2m1)?idx-X2X1mX1:idx+X1); \ new_x2 = (new_x2==X2m1)?0:new_x2+1; \ break; \ case 2: \ new_mem_idx = ( (new_x3==X3m1)?idx-X3X2X1mX2X1:idx+X2X1); \ new_x3 = (new_x3==X3m1)?0:new_x3+1; \ break; \ case 3: \ new_mem_idx = ( (new_x4==X4m1)?idx-X4X3X2X1mX3X2X1:idx+X3X2X1); \ new_x4 = (new_x4==X4m1)?0:new_x4+1; \ break; \ } \ }while(0) #define COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(mydir, idx) do { \ switch(mydir){ \ case 0: \ new_mem_idx = ( (new_x1==0)?idx+X1m1:idx-1); \ new_x1 = (new_x1==0)?X1m1:new_x1 - 1; \ break; \ case 1: \ new_mem_idx = ( (new_x2==0)?idx+X2X1mX1:idx-X1); \ new_x2 = (new_x2==0)?X2m1:new_x2 - 1; \ break; \ case 2: \ new_mem_idx = ( (new_x3==0)?idx+X3X2X1mX2X1:idx-X2X1); \ new_x3 = (new_x3==0)?X3m1:new_x3 - 1; \ break; \ case 3: \ new_mem_idx = ( (new_x4==0)?idx+X4X3X2X1mX3X2X1:idx-X3X2X1); \ new_x4 = (new_x4==0)?X4m1:new_x4 - 1; \ break; \ } \ }while(0) #define GF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, i1,i2,i3,i4) do { \ sign =1; \ switch(dir){ \ case XUP: \ if ( (i4 & 1) == 1){ \ sign = 1; \ } \ break; \ case YUP: \ if ( ((i4+i1) & 1) == 1){ \ sign = 1; \ } \ break; \ case ZUP: \ if ( ((i4+i1+i2) & 1) == 1){ \ sign = 1; \ } \ break; \ case TUP: \ if (i4 == X4m1 ){ \ sign = 1; \ } \ break; \ } \ }while (0) //for now we only consider 12-reconstruct and single precision template<int oddBit> __global__ void parity_compute_gauge_force_kernel(float2* momEven, float2* momOdd, int dir, double eb3, float4* linkEven, float4* linkOdd, int* input_path, int* length, float* path_coeff, int num_paths) { int i,j=0; int sid = blockIdx.x * blockDim.x + threadIdx.x; int z1 = sid / X1h; int x1h = sid - z1*X1h; int z2 = z1 / X2; int x2 = z1 - z2*X2; int x4 = z2 / X3; int x3 = z2 - x4*X3; int x1odd = (x2 + x3 + x4 + oddBit) & 1; int x1 = 2*x1h + x1odd; int X = 2*sid + x1odd; int sign = 1; float2* mymom=momEven; if (oddBit){ mymom = momOdd; } float4 LINKA0, LINKA1, LINKA2, LINKA3, LINKA4; float4 LINKB0, LINKB1, LINKB2, LINKB3, LINKB4; float2 STAPLE0, STAPLE1, STAPLE2, STAPLE3,STAPLE4, STAPLE5, STAPLE6, STAPLE7, STAPLE8; float2 AH0, AH1, AH2, AH3, AH4; int new_mem_idx; SET_SU3_MATRIX(staple, 0); for(i=0;i < num_paths; i++){ int nbr_oddbit = (oddBit^1 ); int new_x1 =x1; int new_x2 =x2; int new_x3 =x3; int new_x4 =x4; COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(dir, X); //linka: current matrix //linkb: the loaded matrix in this round SET_UNIT_SU3_MATRIX(linka); int* path = input_path + i*path_max_length; int lnkdir; int path0 = path[0]; if (GOES_FORWARDS(path0)){ lnkdir=path0; }else{ lnkdir=OPP_DIR(path0); COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(path0), new_mem_idx); nbr_oddbit = nbr_oddbit^1; } int nbr_idx = new_mem_idx >>1; if (nbr_oddbit){ LOAD_ODD_MATRIX( lnkdir, nbr_idx, LINKB); }else{ LOAD_EVEN_MATRIX( lnkdir, nbr_idx, LINKB); } GF_COMPUTE_RECONSTRUCT_SIGN(sign, lnkdir, new_x1, new_x2, new_x3, new_x4); RECONSTRUCT_MATRIX(lnkdir, nbr_idx, sign, linkb); if (GOES_FORWARDS(path0)){ COPY_SU3_MATRIX(linkb, linka); COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(path0, new_mem_idx); nbr_oddbit = nbr_oddbit^1; }else{ SU3_ADJOINT(linkb, linka); } for(j=1; j < length[i]; j++){ int lnkdir; int pathj = path[j]; if (GOES_FORWARDS(pathj)){ lnkdir=pathj; }else{ lnkdir=OPP_DIR(pathj); COMPUTE_NEW_FULL_IDX_MINUS_UPDATE(OPP_DIR(pathj), new_mem_idx); nbr_oddbit = nbr_oddbit^1; } int nbr_idx = new_mem_idx >>1; if (nbr_oddbit){ LOAD_ODD_MATRIX(lnkdir, nbr_idx, LINKB); }else{ LOAD_EVEN_MATRIX(lnkdir, nbr_idx, LINKB); } GF_COMPUTE_RECONSTRUCT_SIGN(sign, lnkdir, new_x1, new_x2, new_x3, new_x4); RECONSTRUCT_MATRIX(lnkdir, nbr_idx, sign, linkb); if (GOES_FORWARDS(pathj)){ MULT_SU3_NN_TEST(linka, linkb); COMPUTE_NEW_FULL_IDX_PLUS_UPDATE(pathj, new_mem_idx); nbr_oddbit = nbr_oddbit^1; }else{ MULT_SU3_NA_TEST(linka, linkb); } }//j SCALAR_MULT_ADD_SU3_MATRIX(staple, linka, path_coeff[i], staple); }//i //update mom if (oddBit){ LOAD_ODD_MATRIX(dir, sid, LINKA); }else{ LOAD_EVEN_MATRIX(dir, sid, LINKA); } GF_COMPUTE_RECONSTRUCT_SIGN(sign, dir, x1, x2, x3, x4); RECONSTRUCT_MATRIX(dir, sid, sign, linka); MULT_SU3_NN_TEST(linka, staple); LOAD_ANTI_HERMITIAN(mymom, dir, sid, AH); UNCOMPRESS_ANTI_HERMITIAN(ah, linkb); SCALAR_MULT_SUB_SU3_MATRIX(linkb, linka, eb3, linka); MAKE_ANTI_HERMITIAN(linka, ah); WRITE_ANTI_HERMITIAN(mymom, dir, sid, AH); return; } void gauge_force_cuda(FullMom cudaMom, int dir, double eb3, FullGauge cudaSiteLink, QudaGaugeParam* param, int** input_path, int* length, void* path_coeff, int num_paths, int max_length) { int i, j; //input_path int bytes = num_paths*max_length* sizeof(int); int* input_path_d; cudaMalloc((void**)&input_path_d, bytes); checkCudaError(); cudaMemset(input_path_d, 0, bytes);checkCudaError(); int* input_path_h = (int*)malloc(bytes); if (input_path_h == NULL){ printf("ERROR: malloc failed for input_path_h in function %s\n", __FUNCTION__); exit(1); } memset(input_path_h, 0, bytes); for(i=0;i < num_paths;i++){ for(j=0; j < length[i]; j++){ input_path_h[i*max_length + j] =input_path[i][j]; } } cudaMemcpy(input_path_d, input_path_h, bytes, cudaMemcpyHostToDevice); checkCudaError(); //length int* length_d; cudaMalloc((void**)&length_d, num_paths*sizeof(int)); checkCudaError(); cudaMemcpy(length_d, length, num_paths*sizeof(int), cudaMemcpyHostToDevice); checkCudaError(); //path_coeff int gsize; if (param->cuda_prec == QUDA_DOUBLE_PRECISION){ gsize = sizeof(double); }else{ gsize= sizeof(float); } void* path_coeff_d; cudaMalloc((void**)&path_coeff_d, num_paths*gsize); checkCudaError(); cudaMemcpy(path_coeff_d, path_coeff, num_paths*gsize, cudaMemcpyHostToDevice); checkCudaError(); //compute the gauge forces int volume = param->X[0]*param->X[1]*param->X[2]*param->X[3]; dim3 blockDim(BLOCK_DIM, 1,1); dim3 gridDim(volume/blockDim.x, 1, 1); dim3 halfGridDim(volume/(2*blockDim.x), 1, 1); float2* momEven = (float2*)cudaMom.even; float2* momOdd = (float2*)cudaMom.odd; float4* linkEven = (float4*)cudaSiteLink.even; float4* linkOdd = (float4*)cudaSiteLink.odd; cudaBindTexture(0, siteLink0TexSingle_recon, cudaSiteLink.even, cudaSiteLink.bytes); cudaBindTexture(0, siteLink1TexSingle_recon, cudaSiteLink.odd, cudaSiteLink.bytes); parity_compute_gauge_force_kernel<0><<<halfGridDim, blockDim>>>(momEven, momOdd, dir, eb3, linkEven, linkOdd, input_path_d, length_d, (float*)path_coeff_d, num_paths); //odd /* The reason we do not switch the even/odd function input paramemters and the texture binding * is that we use the oddbit to decided where to load, in the kernel function */ parity_compute_gauge_force_kernel<1><<<halfGridDim, blockDim>>>(momEven, momOdd, dir, eb3, linkEven, linkOdd, input_path_d, length_d, (float*)path_coeff_d, num_paths); cudaUnbindTexture(siteLink0TexSingle_recon); cudaUnbindTexture(siteLink1TexSingle_recon); checkCudaError(); cudaFree(input_path_d); checkCudaError(); free(input_path_h); cudaFree(length_d); cudaFree(path_coeff_d); } #undef LOAD_EVEN_MATRIX #undef LOAD_ODD_MATRIX #undef LOAD_MATRIX #undef LOAD_ANTI_HERMITIAN #undef WRITE_ANTI_HERMITIAN #undef RECONSTRUCT_MATRIX
ecdf7163b1a014d3436a6f08b9daadfc147ff639.hip
// !!! This is a file automatically generated by hipify!!! // to test for orders 1 to 10: // for N in `seq 1 10` ; do nvcc -Dp_N=$N -arch=sm_60 --use_fast_math -o dgemm dgemm.cu -lcublas -lm; ./dgemm ; done #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hiprand/hiprand.h> #define dfloat double #ifndef p_N #define p_N 4 #endif #define p_Np ((int)((p_N+1)*(p_N+2))/2) // scraped from recent #if p_N==1 #define p_cubNp 6 #endif #if p_N==2 #define p_cubNp 12 #endif #if p_N==3 #define p_cubNp 19 #endif #if p_N==4 #define p_cubNp 36 #endif #if p_N==5 #define p_cubNp 54 #endif #if p_N==6 #define p_cubNp 73 #endif #if p_N==7 #define p_cubNp 93 #endif #if p_N==8 #define p_cubNp 118 #endif #if p_N==9 #define p_cubNp 145 #endif #if p_N==10 #define p_cubNp 256 #endif #define p_Nvgeo 7 #define p_RXID 0 #define p_RYID 1 #define p_SXID 2 #define p_SYID 3 __global__ void volumeFlux(const int Nelements, const dfloat * __restrict__ vgeo, const dfloat * __restrict__ q, dfloat * __restrict__ rhsq ){ const int e = blockIdx.x; const int t = threadIdx.x; const int id = t + e*p_cubNp*4; const dfloat rx = vgeo[e*p_Nvgeo + p_RXID]; const dfloat ry = vgeo[e*p_Nvgeo + p_RYID]; const dfloat sx = vgeo[e*p_Nvgeo + p_SXID]; const dfloat sy = vgeo[e*p_Nvgeo + p_SYID]; const dfloat un = q[id + 0*p_cubNp]; const dfloat vn = q[id + 1*p_cubNp]; const dfloat udn = q[id + 2*p_cubNp]; const dfloat vdn = q[id + 3*p_cubNp]; const dfloat f11 = un*udn; const dfloat f12 = vn*udn; const dfloat f21 = un*vdn; const dfloat f22 = vn*vdn; rhsq[id + 0*p_cubNp] = rx*f11 + ry*f12; rhsq[id + 1*p_cubNp] = sx*f11 + sy*f12; rhsq[id + 2*p_cubNp] = rx*f21 + ry*f22; rhsq[id + 3*p_cubNp] = sx*f21 + sy*f22; } void gpuFillRand(int N, dfloat **h_v, dfloat **c_v){ *h_v = (dfloat*) calloc(N, sizeof(dfloat)); for(int n=0;n<N;++n) h_v[0][n] = drand48(); hipMalloc(c_v, N*sizeof(dfloat)); hipMemcpy(*c_v, *h_v, N*sizeof(dfloat), hipMemcpyHostToDevice); } void gpuBlasGemm(hipblasHandle_t &handle, const dfloat *A, const dfloat *B, dfloat *C, const int m, const int k, const int n) { int lda=m,ldb=k,ldc=m; const dfloat alf = 1; const dfloat bet = 0; const dfloat *alpha = &alf; const dfloat *beta = &bet; // Do the actual multiplication if(sizeof(dfloat)==8) hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, (double*)alpha, (double*)A, lda, (double*)B, ldb, (double*)beta, (double*)C, ldc); else hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, (float*)alpha, (float*)A, lda, (float*)B, ldb, (float*)beta, (float*)C, ldc); } int main(int argc, char **argv){ int Nelements = (argc==1) ? 10000:atoi(argv[1]); // Write exact element number int Np = p_Np; int Ncub = p_cubNp; // fields q = (u,v,ud,vd) dfloat *h_q, *h_cq; dfloat *d_q, *d_cq; // geofacs dfloat *h_vgeo; dfloat *d_vgeo; // matrices dfloat *h_cI, *h_Div; dfloat *d_cI, *d_Div; // results dfloat *h_flux, *h_rhs; dfloat *d_flux, *d_rhs; // allocate geofacs gpuFillRand(p_Nvgeo*Nelements, &h_vgeo, &d_vgeo); // allocate arrays for matrices gpuFillRand(Ncub*Np, &h_cI, &d_cI); gpuFillRand(2*Ncub*Np, &h_Div, &d_Div); // allocate arrays for data gpuFillRand(4*Np*Nelements, &h_q, &d_q); gpuFillRand(4*Ncub*Nelements, &h_cq, &d_cq); gpuFillRand(4*Ncub*Nelements, &h_flux, &d_flux); gpuFillRand(2*Np*Nelements, &h_rhs, &d_rhs); // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); // create events hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); int Niterations = 100; for(int it=0;it<Niterations;++it){ // interpolate from nodes to cubature gpuBlasGemm(handle, d_cI, d_q, d_cq, Ncub, Np, 4*Nelements); // compute volume fluxes dim3 G(Nelements,1,1); dim3 B(p_cubNp,1,1); hipLaunchKernelGGL(( volumeFlux), dim3(G), dim3(B) , 0, 0, Nelements, d_vgeo, d_cq, d_flux); // compute divergence gpuBlasGemm(handle, d_Div, d_flux, d_rhs, Np, 2*Ncub, 2*Nelements); } hipEventRecord(stop); hipEventSynchronize(stop); float elapsed; hipEventElapsedTime(&elapsed, start, stop); elapsed /= (Niterations*1000.); // minimal amount of data that could have moved (excluding matrices) long long int minData = (4*Np + 2*Np + p_Nvgeo )*sizeof(dfloat); long long int actData = (4*Np + 4*Ncub + 4*Ncub + 4*Ncub + 4*Ncub + 2*Np)*sizeof(dfloat); //long long int minFlops = (2*Np*Ncub*4 + Ncub*16 + 2*Np*Ncub*4); long long int minFlops = (2*Np*Ncub*4 + Ncub*4 + 2*Np*Ncub*4 + 8*Np); double GIG = 1024*1024*1024; double minBW = Nelements*(minData/elapsed)/GIG; double actBW = Nelements*(actData/elapsed)/GIG; double gflops = Nelements*(minFlops/elapsed)/GIG; printf("N=%d, K=%d, elapsed = %5.7E, minBW = %5.7E, actBW (est) = %5.7E, estGF = %5.7E\n", p_N, Nelements, elapsed, minBW, actBW, gflops); printf("%d %d %5.7E %5.7E %5.7E %5.7E\n", p_N, Nelements, elapsed, minBW, actBW, gflops); // Destroy the handle hipblasDestroy(handle); exit(0); return 0; }
ecdf7163b1a014d3436a6f08b9daadfc147ff639.cu
// to test for orders 1 to 10: // for N in `seq 1 10` ; do nvcc -Dp_N=$N -arch=sm_60 --use_fast_math -o dgemm dgemm.cu -lcublas -lm; ./dgemm ; done #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cublas_v2.h> #include <curand.h> #define dfloat double #ifndef p_N #define p_N 4 #endif #define p_Np ((int)((p_N+1)*(p_N+2))/2) // scraped from recent #if p_N==1 #define p_cubNp 6 #endif #if p_N==2 #define p_cubNp 12 #endif #if p_N==3 #define p_cubNp 19 #endif #if p_N==4 #define p_cubNp 36 #endif #if p_N==5 #define p_cubNp 54 #endif #if p_N==6 #define p_cubNp 73 #endif #if p_N==7 #define p_cubNp 93 #endif #if p_N==8 #define p_cubNp 118 #endif #if p_N==9 #define p_cubNp 145 #endif #if p_N==10 #define p_cubNp 256 #endif #define p_Nvgeo 7 #define p_RXID 0 #define p_RYID 1 #define p_SXID 2 #define p_SYID 3 __global__ void volumeFlux(const int Nelements, const dfloat * __restrict__ vgeo, const dfloat * __restrict__ q, dfloat * __restrict__ rhsq ){ const int e = blockIdx.x; const int t = threadIdx.x; const int id = t + e*p_cubNp*4; const dfloat rx = vgeo[e*p_Nvgeo + p_RXID]; const dfloat ry = vgeo[e*p_Nvgeo + p_RYID]; const dfloat sx = vgeo[e*p_Nvgeo + p_SXID]; const dfloat sy = vgeo[e*p_Nvgeo + p_SYID]; const dfloat un = q[id + 0*p_cubNp]; const dfloat vn = q[id + 1*p_cubNp]; const dfloat udn = q[id + 2*p_cubNp]; const dfloat vdn = q[id + 3*p_cubNp]; const dfloat f11 = un*udn; const dfloat f12 = vn*udn; const dfloat f21 = un*vdn; const dfloat f22 = vn*vdn; rhsq[id + 0*p_cubNp] = rx*f11 + ry*f12; rhsq[id + 1*p_cubNp] = sx*f11 + sy*f12; rhsq[id + 2*p_cubNp] = rx*f21 + ry*f22; rhsq[id + 3*p_cubNp] = sx*f21 + sy*f22; } void gpuFillRand(int N, dfloat **h_v, dfloat **c_v){ *h_v = (dfloat*) calloc(N, sizeof(dfloat)); for(int n=0;n<N;++n) h_v[0][n] = drand48(); cudaMalloc(c_v, N*sizeof(dfloat)); cudaMemcpy(*c_v, *h_v, N*sizeof(dfloat), cudaMemcpyHostToDevice); } void gpuBlasGemm(cublasHandle_t &handle, const dfloat *A, const dfloat *B, dfloat *C, const int m, const int k, const int n) { int lda=m,ldb=k,ldc=m; const dfloat alf = 1; const dfloat bet = 0; const dfloat *alpha = &alf; const dfloat *beta = &bet; // Do the actual multiplication if(sizeof(dfloat)==8) cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, (double*)alpha, (double*)A, lda, (double*)B, ldb, (double*)beta, (double*)C, ldc); else cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, (float*)alpha, (float*)A, lda, (float*)B, ldb, (float*)beta, (float*)C, ldc); } int main(int argc, char **argv){ int Nelements = (argc==1) ? 10000:atoi(argv[1]); // Write exact element number int Np = p_Np; int Ncub = p_cubNp; // fields q = (u,v,ud,vd) dfloat *h_q, *h_cq; dfloat *d_q, *d_cq; // geofacs dfloat *h_vgeo; dfloat *d_vgeo; // matrices dfloat *h_cI, *h_Div; dfloat *d_cI, *d_Div; // results dfloat *h_flux, *h_rhs; dfloat *d_flux, *d_rhs; // allocate geofacs gpuFillRand(p_Nvgeo*Nelements, &h_vgeo, &d_vgeo); // allocate arrays for matrices gpuFillRand(Ncub*Np, &h_cI, &d_cI); gpuFillRand(2*Ncub*Np, &h_Div, &d_Div); // allocate arrays for data gpuFillRand(4*Np*Nelements, &h_q, &d_q); gpuFillRand(4*Ncub*Nelements, &h_cq, &d_cq); gpuFillRand(4*Ncub*Nelements, &h_flux, &d_flux); gpuFillRand(2*Np*Nelements, &h_rhs, &d_rhs); // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); // create events cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); int Niterations = 100; for(int it=0;it<Niterations;++it){ // interpolate from nodes to cubature gpuBlasGemm(handle, d_cI, d_q, d_cq, Ncub, Np, 4*Nelements); // compute volume fluxes dim3 G(Nelements,1,1); dim3 B(p_cubNp,1,1); volumeFlux<<< G, B >>> (Nelements, d_vgeo, d_cq, d_flux); // compute divergence gpuBlasGemm(handle, d_Div, d_flux, d_rhs, Np, 2*Ncub, 2*Nelements); } cudaEventRecord(stop); cudaEventSynchronize(stop); float elapsed; cudaEventElapsedTime(&elapsed, start, stop); elapsed /= (Niterations*1000.); // minimal amount of data that could have moved (excluding matrices) long long int minData = (4*Np + 2*Np + p_Nvgeo )*sizeof(dfloat); long long int actData = (4*Np + 4*Ncub + 4*Ncub + 4*Ncub + 4*Ncub + 2*Np)*sizeof(dfloat); //long long int minFlops = (2*Np*Ncub*4 + Ncub*16 + 2*Np*Ncub*4); long long int minFlops = (2*Np*Ncub*4 + Ncub*4 + 2*Np*Ncub*4 + 8*Np); double GIG = 1024*1024*1024; double minBW = Nelements*(minData/elapsed)/GIG; double actBW = Nelements*(actData/elapsed)/GIG; double gflops = Nelements*(minFlops/elapsed)/GIG; printf("N=%d, K=%d, elapsed = %5.7E, minBW = %5.7E, actBW (est) = %5.7E, estGF = %5.7E\n", p_N, Nelements, elapsed, minBW, actBW, gflops); printf("%d %d %5.7E %5.7E %5.7E %5.7E\n", p_N, Nelements, elapsed, minBW, actBW, gflops); // Destroy the handle cublasDestroy(handle); exit(0); return 0; }
d5fa24219b0f7ead0b21645b4ce3839994ea6f18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "functionKernels.h" #define foo(a,b) b?tanh(a):exp(a) __global__ void function(double * __restrict__ A, double * __restrict__ C, int ARows, int ACols, int CRows, int CCols, long val) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= ARows || i >= CRows || j >= ACols || j >= CCols) return; int Cindex = (i * CCols) + (j + val); int Aindex = (i * ACols) + j; C[Cindex] = foo(A[Aindex], val); } __global__ void gradient_function(double * __restrict__ A, double * __restrict__ C, int ARows, int ACols, int CRows, int CCols) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= ARows || i >= CRows || j > ACols || j >= CCols) return; int Cindex = (i * CCols) + j; int Aindex = (i * ACols) + (j+1); C[Cindex] = A[Aindex] * (1 - pow( tanh(C[Cindex]), 2) ); } __global__ void error_function(double* __restrict__ A, double* __restrict__ B, double* __restrict__ C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= ARows || i >= BRows || i >= CRows || j >= ACols || j >= BCols || j >= BCols) return; int Cindex = (i * CCols) + j; int Aindex = (i * ACols) + j; int Bindex = (i * BCols) + j; C[Cindex] = A[Aindex] - B[Bindex]; } __global__ void reduction_function(double * __restrict__ A, double * __restrict__ C, int ARows, int ACols, int Clength) { int Row = blockIdx.y*BLOCK_SIZE + threadIdx.y; __shared__ double As[BLOCK_SIZE][BLOCK_SIZE]; As[threadIdx.y][threadIdx.x] = 0.0; for (int k = 0; k < (BLOCK_SIZE + ACols - 1)/BLOCK_SIZE; k++) { if (k*BLOCK_SIZE + threadIdx.x < ACols && Row < ARows) { As[threadIdx.y][threadIdx.x] += A[Row*ACols + k*BLOCK_SIZE + threadIdx.x]; //printf("%d += A[%d]\n", threadIdx.x, Row*ACols + k*BLOCK_SIZE + threadIdx.x); } __syncthreads(); } if (Row < Clength && threadIdx.x == 0) { double CValue = 0.0; for(int n=0;n<BLOCK_SIZE;n++){ CValue += As[n][n]; } C[Row] = CValue; // printf("30 %d = %lf\n", (Row), CValue); // printf("ARows: %d\n",ARows); } } __global__ void normalize(double* __restrict__ A, double* __restrict__ B, double* __restrict__ C, int ARows, int ACols, int Blength, int CRows, int CCols) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= ARows || i >= Blength || i >= CRows || j >= ACols || j >= CCols) return; int Cindex = (i * CCols) + j; int Aindex = (i * ACols) + j; C[Cindex] = A[Aindex] / B[i]; } __global__ void delta_function(double* __restrict__ A, double* __restrict__ C, int ARows, int ACols, int CRows, int CCols, double val) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= ARows || i >= CRows || j >= ACols || j >= CCols) return; int Cindex = (i * CCols) + j; int Aindex = (i * ACols) + j; C[Cindex] -= val * A[Aindex]; }
d5fa24219b0f7ead0b21645b4ce3839994ea6f18.cu
#include "functionKernels.h" #define foo(a,b) b?tanh(a):exp(a) __global__ void function(double * __restrict__ A, double * __restrict__ C, int ARows, int ACols, int CRows, int CCols, long val) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= ARows || i >= CRows || j >= ACols || j >= CCols) return; int Cindex = (i * CCols) + (j + val); int Aindex = (i * ACols) + j; C[Cindex] = foo(A[Aindex], val); } __global__ void gradient_function(double * __restrict__ A, double * __restrict__ C, int ARows, int ACols, int CRows, int CCols) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= ARows || i >= CRows || j > ACols || j >= CCols) return; int Cindex = (i * CCols) + j; int Aindex = (i * ACols) + (j+1); C[Cindex] = A[Aindex] * (1 - pow( tanh(C[Cindex]), 2) ); } __global__ void error_function(double* __restrict__ A, double* __restrict__ B, double* __restrict__ C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= ARows || i >= BRows || i >= CRows || j >= ACols || j >= BCols || j >= BCols) return; int Cindex = (i * CCols) + j; int Aindex = (i * ACols) + j; int Bindex = (i * BCols) + j; C[Cindex] = A[Aindex] - B[Bindex]; } __global__ void reduction_function(double * __restrict__ A, double * __restrict__ C, int ARows, int ACols, int Clength) { int Row = blockIdx.y*BLOCK_SIZE + threadIdx.y; __shared__ double As[BLOCK_SIZE][BLOCK_SIZE]; As[threadIdx.y][threadIdx.x] = 0.0; for (int k = 0; k < (BLOCK_SIZE + ACols - 1)/BLOCK_SIZE; k++) { if (k*BLOCK_SIZE + threadIdx.x < ACols && Row < ARows) { As[threadIdx.y][threadIdx.x] += A[Row*ACols + k*BLOCK_SIZE + threadIdx.x]; //printf("%d += A[%d]\n", threadIdx.x, Row*ACols + k*BLOCK_SIZE + threadIdx.x); } __syncthreads(); } if (Row < Clength && threadIdx.x == 0) { double CValue = 0.0; for(int n=0;n<BLOCK_SIZE;n++){ CValue += As[n][n]; } C[Row] = CValue; // printf("30 %d = %lf\n", (Row), CValue); // printf("ARows: %d\n",ARows); } } __global__ void normalize(double* __restrict__ A, double* __restrict__ B, double* __restrict__ C, int ARows, int ACols, int Blength, int CRows, int CCols) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= ARows || i >= Blength || i >= CRows || j >= ACols || j >= CCols) return; int Cindex = (i * CCols) + j; int Aindex = (i * ACols) + j; C[Cindex] = A[Aindex] / B[i]; } __global__ void delta_function(double* __restrict__ A, double* __restrict__ C, int ARows, int ACols, int CRows, int CCols, double val) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= ARows || i >= CRows || j >= ACols || j >= CCols) return; int Cindex = (i * CCols) + j; int Aindex = (i * ACols) + j; C[Cindex] -= val * A[Aindex]; }
b09f2c7ee4d9a087c2d53825e601f089d48cd5e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*********************************************************************//** * @file * * @section LICENCE * * Mathematica source file * * Copyright 1986 through 2010 by Wolfram Research Inc. * * @section DESCRIPTION * * * * $Id$ ************************************************************************/ /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <wgl.h> #include <stdio.h> #include <stdlib.h> #include <hipfft.h> #ifdef DEBUG #include <assert.h> #define Assert(x) assert(x) #define Expect(x) assert(x) #else #define Assert(x) #define Expect(x) (void)(x) #endif #ifndef __func__ #ifdef __FUNCTION__ #define __func__ __FUNCTION__ #else #define __func__ __FILE__ #endif #endif #ifdef DEBUG #define PRINT_DBG(...) \ printf(__VA_ARGS__) #define LOG(...) \ printf("One line %d in %s:%s ----", __LINE__, __FILE__, __func__); \ PRINT_DBG(__VA_ARGS__); \ printf("\n"); #else #define PRINT_DBG(...) #define LOG(...) #endif #ifdef DEBUG #define cuLOG(msg) \ if (hipGetLastError() != hipSuccess) { \ LOG(msg); \ } #else #define cuLOG(msg) #endif #ifndef cuSafeCall #define cuSafeCall(stmt) \ { \ hipError_t cutilErr = stmt; \ if (cutilErr != hipSuccess) { \ LOG(" "); \ printf("%s\n", hipGetErrorString(cutilErr)); \ } \ } #endif // CUDA example code that implements the frequency space version of // Jos Stam's paper 'Stable Fluids' in 2D. This application uses the // CUDA FFT library (CUFFT) to perform velocity diffusion and to // force non-divergence in the velocity field at each time step. It uses // CUDA-OpenGL interoperability to update the particle field directly // instead of doing a copy to system memory before drawing. Texture is // used for automatic bilinear interpolation at the velocity advection step. #define SetDim(dim) \ DIM = dim; \ DS = dim*dim; \ CPADW = dim/2+1; \ RPADW = 2*(dim/2+1); \ PDS = DIM*CPADW; \ wWidth = max(1024, (int) DIM); \ wHeight = max(1024, (int) DIM) #define MAX_EPSILON_ERROR 1.0f mint DIM = 128; // Square size of solver domain mint DS = (DIM*DIM); // Total domain size mint CPADW = (DIM/2+1); // Padded width for real->complex in-place FFT mint RPADW = (2*(DIM/2+1)); // Padded width for real->complex in-place FFT mint PDS = (DIM*CPADW); // Padded total domain size float DT = 0.1f; // Delta T for interative solver float VIS = 0.0025f; // Viscosity constant float FORCE = (5.8f*DIM); // Force scale factor int FR = 4; // Force update radius #define TILEX 64 // Tile width #define TILEY 64 // Tile height #define TIDSX 64 // Tids in X #define TIDSY 4 // Tids in Y // Vector data type used to velocity and force fields typedef float2 cData; // Texture reference for reading velocity field texture<float2, 2> texref; static hipArray *array = NULL; // CUFFT plan handle static hipfftHandle planr2c; static hipfftHandle planc2r; static cData *vxfield = NULL; static cData *vyfield = NULL; cData *hvfield = NULL; cData *dvfield = NULL; mint wWidth = max(1024, (int) DIM); mint wHeight = max(1024, (int) DIM); static int clicked = 0; // Particle data static cData *deviceParticles = NULL; // particle positions in host memory static cData *hostParticles = NULL; // particle positions in host memory static int lastx = 0, lasty = 0; // Texture pitch size_t tPitch = 0; // Now this is compatible with gcc in 64-bit static void setTimeDelta(float newDT); static void setViscosity(float newVIS); static void setForce(float newFORCE); static void setForceRadius(mint newFR); static void addForces(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r); static void advectVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt); static void diffuseProject(cData *vx, cData *vy, int dx, int dy, float dt, float visc); static void updateVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy); static void advectParticles(cData *v, int dx, int dy, float dt); static cData * simulateFluids(mint dim); static void resetParticles(); static void setParticles(double * data, mint n); static void click(int button, int updown, int x, int y); static void motion(int x, int y); static void FluidDynamics_cleanup(); static void initialize(mint dim); //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // from cutil_math.h //////////////////////////////////////////////////////////////////////////////// // float functions //////////////////////////////////////////////////////////////////////////////// // lerp inline __device__ __host__ float lerp(float a, float b, float t) { return a + t*(b-a); } // clamp inline __device__ __host__ float clamp(float f, float a, float b) { return fmaxf(a, fminf(f, b)); } //////////////////////////////////////////////////////////////////////////////// // float2 functions //////////////////////////////////////////////////////////////////////////////// // additional constructors inline __host__ __device__ float2 make_float2(float s) { return make_float2(s, s); } inline __host__ __device__ float2 make_float2(int2 a) { return make_float2(float(a.x), float(a.y)); } // negate inline __host__ __device__ float2 operator-(float2 &a) { return make_float2(-a.x, -a.y); } // addition inline __host__ __device__ float2 operator+(float2 a, float2 b) { return make_float2(a.x + b.x, a.y + b.y); } inline __host__ __device__ void operator+=(float2 &a, float2 b) { a.x += b.x; a.y += b.y; } // subtract inline __host__ __device__ float2 operator-(float2 a, float2 b) { return make_float2(a.x - b.x, a.y - b.y); } inline __host__ __device__ void operator-=(float2 &a, float2 b) { a.x -= b.x; a.y -= b.y; } // multiply inline __host__ __device__ float2 operator*(float2 a, float2 b) { return make_float2(a.x * b.x, a.y * b.y); } inline __host__ __device__ float2 operator*(float2 a, float s) { return make_float2(a.x * s, a.y * s); } inline __host__ __device__ float2 operator*(float s, float2 a) { return make_float2(a.x * s, a.y * s); } inline __host__ __device__ void operator*=(float2 &a, float s) { a.x *= s; a.y *= s; } // divide inline __host__ __device__ float2 operator/(float2 a, float2 b) { return make_float2(a.x / b.x, a.y / b.y); } inline __host__ __device__ float2 operator/(float2 a, float s) { float inv = 1.0f / s; return a * inv; } inline __host__ __device__ float2 operator/(float s, float2 a) { float inv = 1.0f / s; return a * inv; } inline __host__ __device__ void operator/=(float2 &a, float s) { float inv = 1.0f / s; a *= inv; } // lerp inline __device__ __host__ float2 lerp(float2 a, float2 b, float t) { return a + t*(b-a); } // clamp inline __device__ __host__ float2 clamp(float2 v, float a, float b) { return make_float2(clamp(v.x, a, b), clamp(v.y, a, b)); } inline __device__ __host__ float2 clamp(float2 v, float2 a, float2 b) { return make_float2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y)); } // dot product inline __host__ __device__ float dot(float2 a, float2 b) { return a.x * b.x + a.y * b.y; } // length inline __host__ __device__ float length(float2 v) { return sqrtf(dot(v, v)); } // normalize inline __host__ __device__ float2 normalize(float2 v) { float invLen = rsqrtf(dot(v, v)); return v * invLen; } // floor inline __host__ __device__ float2 floor(const float2 v) { return make_float2(floor(v.x), floor(v.y)); } // reflect inline __host__ __device__ float2 reflect(float2 i, float2 n) { return i - 2.0f * n * dot(n,i); } // absolute value inline __host__ __device__ float2 fabs(float2 v) { return make_float2(fabs(v.x), fabs(v.y)); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void setupTexture(int x, int y) { hipError_t err; // Wrap mode appears to be the new default texref.filterMode = hipFilterModeLinear; hipChannelFormatDesc desc = hipCreateChannelDesc<float2>(); err = hipMallocArray(&array, &desc, y, x); Expect(err == hipSuccess); } void bindTexture(void) { if (array != NULL) { hipError_t err; err = hipBindTextureToArray(texref, array); Expect(err == hipSuccess); } } void unbindTexture(void) { if (array != NULL) { hipError_t err; err = hipUnbindTexture(texref); Expect(err == hipSuccess); } } void updateTexture(cData *data, size_t wib, size_t h, size_t pitch) { if (array != NULL) { hipError_t err; err = hipMemcpy2DToArray(array, 0, 0, data, pitch, wib, h, hipMemcpyDeviceToDevice); Expect(err == hipSuccess); } } void deleteTexture(void) { if (array != NULL) { hipFreeArray(array); } array = NULL; } // Note that these kernels are designed to work with arbitrary // domain sizes, not just domains that are multiples of the tile // size. Therefore, we have extra code that checks to make sure // a given thread location falls within the domain boundaries in // both X and Y. Also, the domain is covered by looping over // multiple elements in the Y direction, while there is a one-to-one // mapping between threads in X and the tile size in X. // Nolan Goodnight 9/22/06 // This method adds constant force vectors to the velocity field // stored in 'v' according to v(x,t+1) = v(x,t) + dt * f. __global__ void addForces_k(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r, size_t pitch) { int tx = threadIdx.x; int ty = threadIdx.y; cData *fj = (cData*)((char*)v + (ty + spy) * pitch) + tx + spx; cData vterm = *fj; tx -= r; ty -= r; float s = 1.f / (1.f + tx*tx*tx*tx + ty*ty*ty*ty); vterm.x += s * fx; vterm.y += s * fy; *fj = vterm; } // This method performs the velocity advection step, where we // trace velocity vectors back in time to update each grid cell. // That is, v(x,t+1) = v(p(x,-dt),t). Here we perform bilinear // interpolation in the velocity space. __global__ void advectVelocity_k(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt, int lb) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; cData vterm, ploc; float vxterm, vyterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * pdx + gtidx; vterm = tex2D(texref, (float)gtidx, (float)fi); ploc.x = (gtidx + 0.5f) - (dt * vterm.x * dx); ploc.y = (fi + 0.5f) - (dt * vterm.y * dy); vterm = tex2D(texref, ploc.x, ploc.y); vxterm = vterm.x; vyterm = vterm.y; vx[fj] = vxterm; vy[fj] = vyterm; } } } } // This method performs velocity diffusion and forces mass conservation // in the frequency domain. The inputs 'vx' and 'vy' are complex-valued // arrays holding the Fourier coefficients of the velocity field in // X and Y. Diffusion in this space takes a simple form described as: // v(k,t) = v(k,t) / (1 + visc * dt * k^2), where visc is the viscosity, // and k is the wavenumber. The projection step forces the Fourier // velocity vectors to be orthogonal to the vectors for each // wavenumber: v(k,t) = v(k,t) - ((k dot v(k,t) * k) / k^2. __global__ void diffuseProject_k(cData *vx, cData *vy, int dx, int dy, float dt, float visc, int lb) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; cData xterm, yterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * dx + gtidx; xterm = vx[fj]; yterm = vy[fj]; // Compute the index of the wavenumber based on the // data order produced by a standard NN FFT. int iix = gtidx; int iiy = (fi>dy/2)?(fi-(dy)):fi; // Velocity diffusion float kk = (float)(iix * iix + iiy * iiy); // k^2 float diff = 1.f / (1.f + visc * dt * kk); xterm.x *= diff; xterm.y *= diff; yterm.x *= diff; yterm.y *= diff; // Velocity projection if (kk > 0.f) { float rkk = 1.f / kk; // Real portion of velocity projection float rkp = (iix * xterm.x + iiy * yterm.x); // Imaginary portion of velocity projection float ikp = (iix * xterm.y + iiy * yterm.y); xterm.x -= rkk * rkp * iix; xterm.y -= rkk * ikp * iix; yterm.x -= rkk * rkp * iiy; yterm.y -= rkk * ikp * iiy; } vx[fj] = xterm; vy[fj] = yterm; } } } } // This method updates the velocity field 'v' using the two complex // arrays from the previous step: 'vx' and 'vy'. Here we scale the // real components by 1/(dx*dy) to account for an unnormalized FFT. __global__ void updateVelocity_k(cData *v, float *vx, float *vy, int dx, int pdx, int dy, int lb, size_t pitch) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; float vxterm, vyterm; cData nvterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fjr = fi * pdx + gtidx; vxterm = vx[fjr]; vyterm = vy[fjr]; // Normalize the result of the inverse FFT float scale = 1.f / (dx * dy); nvterm.x = vxterm * scale; nvterm.y = vyterm * scale; cData *fj = (cData*)((char*)v + fi * pitch) + gtidx; *fj = nvterm; } } // If this thread is inside the domain in Y } // If this thread is inside the domain in X } // This method updates the hostParticles by moving particle positions // according to the velocity field and time step. That is, for each // particle: p(t+1) = p(t) + dt * v(p(t)). __global__ void advectParticles_k(cData *part, cData *v, int dx, int dy, float dt, int lb, size_t pitch) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; // gtidx is the domain location in x for this thread cData pterm, vterm; if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * dx + gtidx; pterm = part[fj]; int xvi = ((int)(pterm.x * dx)); int yvi = ((int)(pterm.y * dy)); vterm = *((cData*)((char*)v + yvi * pitch) + xvi); pterm.x += dt * vterm.x; pterm.x = pterm.x - (int)pterm.x; pterm.x += 1.f; pterm.x = pterm.x - (int)pterm.x; pterm.y += dt * vterm.y; pterm.y = pterm.y - (int)pterm.y; pterm.y += 1.f; pterm.y = pterm.y - (int)pterm.y; part[fj] = pterm; } } // If this thread is inside the domain in Y } // If this thread is inside the domain in X } static void setTimeDelta(float newDT) { DT = newDT; return ; } static void setViscosity(float newVIS) { VIS = newVIS; return ; } static void setForce(float newFORCE) { FORCE = newFORCE*DIM; return ; } static void setForceRadius(mint newFR) { FR = newFR; return ; } static void addForces(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r) { dim3 tids(2*r+1, 2*r+1); hipLaunchKernelGGL(( addForces_k), dim3(1), dim3(tids), 0, 0, v, dx, dy, spx, spy, fx, fy, r, tPitch); } static void advectVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); updateTexture(v, DIM*sizeof(cData), DIM, tPitch); hipLaunchKernelGGL(( advectVelocity_k), dim3(grid), dim3(tids), 0, 0, v, vx, vy, dx, pdx, dy, dt, TILEY/TIDSY); } static void diffuseProject(cData *vx, cData *vy, int dx, int dy, float dt, float visc) { // Forward FFT hipfftExecR2C(planr2c, (hipfftReal*)vx, (hipfftComplex*)vx); hipfftExecR2C(planr2c, (hipfftReal*)vy, (hipfftComplex*)vy); uint3 grid = make_uint3((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1), 1); uint3 tids = make_uint3(TIDSX, TIDSY, 1); hipLaunchKernelGGL(( diffuseProject_k), dim3(grid), dim3(tids), 0, 0, vx, vy, dx, dy, dt, visc, TILEY/TIDSY); // Inverse FFT hipfftExecC2R(planc2r, (hipfftComplex*)vx, (hipfftReal*)vx); hipfftExecC2R(planc2r, (hipfftComplex*)vy, (hipfftReal*)vy); } static void updateVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); hipLaunchKernelGGL(( updateVelocity_k), dim3(grid), dim3(tids), 0, 0, v, vx, vy, dx, pdx, dy, TILEY/TIDSY, tPitch); } static void advectParticles(cData *v, int dx, int dy, float dt) { hipError_t err; dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); hipLaunchKernelGGL(( advectParticles_k), dim3(grid), dim3(tids), 0, 0, deviceParticles, v, dx, dy, dt, TILEY/TIDSY, tPitch); err = hipMemcpy(hostParticles, deviceParticles, DS*sizeof(cData), hipMemcpyDeviceToHost); Expect(err == hipSuccess); } static cData * simulateFluids(mint dim) { if (dim != DIM && dim != -1) { FluidDynamics_cleanup(); initialize(dim); } // simulate fluid advectVelocity(dvfield, (float*)vxfield, (float*)vyfield, DIM, RPADW, DIM, DT); diffuseProject(vxfield, vyfield, CPADW, DIM, DT, VIS); updateVelocity(dvfield, (float*)vxfield, (float*)vyfield, DIM, RPADW, DIM); advectParticles(dvfield, DIM, DIM, DT); return hostParticles; } static void resetParticles() { hipError_t err; int i, j; memset(hvfield, 0, sizeof(cData) * DS); hipMemcpy(dvfield, hvfield, sizeof(cData) * DS, hipMemcpyHostToDevice); for (i = 0; i < DIM; i++) { for (j = 0; j < DIM; j++) { hostParticles[i*DIM+j].x = (j+0.5f+(rand()/(float)RAND_MAX - 0.5f))/DIM; hostParticles[i*DIM+j].y = (i+0.5f+(rand()/(float)RAND_MAX - 0.5f))/DIM; } } err = hipMemcpy(deviceParticles, hostParticles, DS*sizeof(cData), hipMemcpyHostToDevice); Expect(err == hipSuccess); } static void setParticles(double * data, mint n) { hipError_t err; int i, j; memset(hvfield, 0, sizeof(cData) * DS); hipMemcpy(dvfield, hvfield, sizeof(cData) * DS, hipMemcpyHostToDevice); for (i = 0; i < DIM; i++) { for (j = 0; j < DIM; j++) { if (2*(i*DIM + j) < n) { hostParticles[i*DIM+j].x = static_cast<float>(data[2*(i*DIM + j)]); hostParticles[i*DIM+j].y = static_cast<float>(data[2*(i*DIM + j) + 1]); } } } err = hipMemcpy(deviceParticles, hostParticles, DS*sizeof(cData), hipMemcpyHostToDevice); Expect(err == hipSuccess); } static void click(int button, int updown, int x, int y) { lastx = x; lasty = y; clicked = !clicked; } static void motion(int x, int y) { // Convert motion coordinates to domain float fx = (lastx / (float)wWidth); float fy = (lasty / (float)wHeight); int nx = (int)(fx * DIM); int ny = (int)(fy * DIM); if (clicked && nx < DIM-FR && nx > FR-1 && ny < DIM-FR && ny > FR-1) { int ddx = x - lastx; int ddy = y - lasty; fx = ddx / (float)wWidth; fy = ddy / (float)wHeight; int spy = ny-FR; int spx = nx-FR; addForces(dvfield, DIM, DIM, spx, spy, FORCE * DT * fx, FORCE * DT * fy, FR); lastx = x; lasty = y; } } static void FluidDynamics_cleanup() { unbindTexture(); deleteTexture(); // Free all host and device resources if (hvfield != NULL) { free(hvfield); hvfield = NULL; } if (hostParticles != NULL) { free(hostParticles); hostParticles = NULL; } if (dvfield != NULL) { hipFree(dvfield); dvfield = NULL; } if (deviceParticles != NULL) { hipFree(deviceParticles); deviceParticles = NULL; } if (vxfield != NULL) { hipFree(vxfield); vxfield = NULL; } if (vyfield != NULL) { hipFree(vyfield); vyfield = NULL; } hipfftDestroy(planr2c); hipfftDestroy(planc2r); } static void initialize(mint dim) { if (dim != -1) { SetDim(dim); } hvfield = (cData*)malloc(sizeof(cData) * DS); memset(hvfield, 0, sizeof(cData) * DS); // Allocate and initialize device data hipMallocPitch((void**)&dvfield, &tPitch, sizeof(cData)*DIM, DIM); hipMemcpy(dvfield, hvfield, sizeof(cData) * DS, hipMemcpyHostToDevice); // Temporary complex velocity field data hipMalloc((void**)&vxfield, sizeof(cData) * PDS); hipMalloc((void**)&vyfield, sizeof(cData) * PDS); setupTexture(DIM, DIM); bindTexture(); // Create particle array hostParticles = (cData*)malloc(sizeof(cData) * DS); hipMalloc((void **) &deviceParticles, sizeof(cData) * DS); memset(hostParticles, 0, sizeof(cData) * DS); resetParticles(); // Create CUFFT transform plan configuration hipfftPlan2d(&planr2c, DIM, DIM, HIPFFT_R2C); hipfftPlan2d(&planc2r, DIM, DIM, HIPFFT_C2R); // TODO: update kernels to use the new unpadded memory layout for perf // rather than the old FFTW-compatible layout cufftSetCompatibilityMode(planr2c, HIPFFT_COMPATIBILITY_FFTW_PADDING); cufftSetCompatibilityMode(planc2r, HIPFFT_COMPATIBILITY_FFTW_PADDING); } EXTERN_C DLLEXPORT int oFluidDynamics_SetTimeDelta(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { float val; Assert(Argc == 1); val = static_cast<float>(MArgument_getReal(Args[0])); setTimeDelta(val); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_SetViscosity(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { float val; Assert(Argc == 1); val = static_cast<float>(MArgument_getReal(Args[0])); setViscosity(val); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_SetForce(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { float val; Assert(Argc == 1); val = static_cast<float>(MArgument_getReal(Args[0])); setForce(val); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_SetForceRadius(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { mint val; Assert(Argc == 1); val = MArgument_getInteger(Args[0]); setForceRadius(val); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_Initialize(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { mint dim; dim = MArgument_getInteger(Args[0]); initialize(dim); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_Motion(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { mint x, y; x = MArgument_getInteger(Args[0]); y = MArgument_getInteger(Args[1]); motion(x, y); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_MouseMovement(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { mint button, updown, x, y; button = MArgument_getInteger(Args[0]); updown = MArgument_getInteger(Args[1]); x = MArgument_getInteger(Args[2]); y = MArgument_getInteger(Args[3]); click(button, updown, x, y); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_StepAsParticles(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { MTensor tensor; cData * mem; mint dim; tensor = MArgument_getMTensor(Args[0]); dim = MArgument_getInteger(Args[1]); mem = simulateFluids(dim); Expect(mem != NULL); for (mint ii = 0; ii < libData->MTensor_getFlattenedLength(tensor)/2; ii++) { libData->MTensor_getRealData(tensor)[2*ii] = static_cast<double>(mem[ii].x); libData->MTensor_getRealData(tensor)[2*ii+1] = static_cast<double>(mem[ii].y); } MArgument_setMTensor(Res, tensor); libData->MTensor_disown(tensor); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_StepAsPixels(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { mint dim; MTensor tensor; cData * mem; double * tensorData; tensor = MArgument_getMTensor(Args[0]); dim = MArgument_getInteger(Args[1]); mem = simulateFluids(dim); Expect(mem != NULL); tensorData = libData->MTensor_getRealData(tensor); memset(tensorData, 0, libData->MTensor_getFlattenedLength(tensor)*sizeof(double)); for (mint ii = 0; ii < libData->MTensor_getFlattenedLength(tensor); ii++) { tensorData[(int) mem[ii].x + ((int) mem[ii].y * libData->MTensor_getDimensions(tensor)[1])] += 1; } MArgument_setMTensor(Res, tensor); libData->MTensor_disown(tensor); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_ResetParticles(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { resetParticles(); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_SetParticles(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { MTensor tensor; tensor = MArgument_getMTensor(Args[0]); setParticles(libData->MTensor_getRealData(tensor), libData->MTensor_getFlattenedLength(tensor)); libData->MTensor_disown(tensor); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT mint oFluidDynamics_Uninitialize(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { FluidDynamics_cleanup(); return LIBRARY_NO_ERROR; }
b09f2c7ee4d9a087c2d53825e601f089d48cd5e8.cu
/*********************************************************************//** * @file * * @section LICENCE * * Mathematica source file * * Copyright 1986 through 2010 by Wolfram Research Inc. * * @section DESCRIPTION * * * * $Id$ ************************************************************************/ /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <wgl.h> #include <stdio.h> #include <stdlib.h> #include <cufft.h> #ifdef DEBUG #include <assert.h> #define Assert(x) assert(x) #define Expect(x) assert(x) #else #define Assert(x) #define Expect(x) (void)(x) #endif #ifndef __func__ #ifdef __FUNCTION__ #define __func__ __FUNCTION__ #else #define __func__ __FILE__ #endif #endif #ifdef DEBUG #define PRINT_DBG(...) \ printf(__VA_ARGS__) #define LOG(...) \ printf("One line %d in %s:%s ----", __LINE__, __FILE__, __func__); \ PRINT_DBG(__VA_ARGS__); \ printf("\n"); #else #define PRINT_DBG(...) #define LOG(...) #endif #ifdef DEBUG #define cuLOG(msg) \ if (cudaGetLastError() != cudaSuccess) { \ LOG(msg); \ } #else #define cuLOG(msg) #endif #ifndef cuSafeCall #define cuSafeCall(stmt) \ { \ cudaError_t cutilErr = stmt; \ if (cutilErr != cudaSuccess) { \ LOG(" "); \ printf("%s\n", cudaGetErrorString(cutilErr)); \ } \ } #endif // CUDA example code that implements the frequency space version of // Jos Stam's paper 'Stable Fluids' in 2D. This application uses the // CUDA FFT library (CUFFT) to perform velocity diffusion and to // force non-divergence in the velocity field at each time step. It uses // CUDA-OpenGL interoperability to update the particle field directly // instead of doing a copy to system memory before drawing. Texture is // used for automatic bilinear interpolation at the velocity advection step. #define SetDim(dim) \ DIM = dim; \ DS = dim*dim; \ CPADW = dim/2+1; \ RPADW = 2*(dim/2+1); \ PDS = DIM*CPADW; \ wWidth = max(1024, (int) DIM); \ wHeight = max(1024, (int) DIM) #define MAX_EPSILON_ERROR 1.0f mint DIM = 128; // Square size of solver domain mint DS = (DIM*DIM); // Total domain size mint CPADW = (DIM/2+1); // Padded width for real->complex in-place FFT mint RPADW = (2*(DIM/2+1)); // Padded width for real->complex in-place FFT mint PDS = (DIM*CPADW); // Padded total domain size float DT = 0.1f; // Delta T for interative solver float VIS = 0.0025f; // Viscosity constant float FORCE = (5.8f*DIM); // Force scale factor int FR = 4; // Force update radius #define TILEX 64 // Tile width #define TILEY 64 // Tile height #define TIDSX 64 // Tids in X #define TIDSY 4 // Tids in Y // Vector data type used to velocity and force fields typedef float2 cData; // Texture reference for reading velocity field texture<float2, 2> texref; static cudaArray *array = NULL; // CUFFT plan handle static cufftHandle planr2c; static cufftHandle planc2r; static cData *vxfield = NULL; static cData *vyfield = NULL; cData *hvfield = NULL; cData *dvfield = NULL; mint wWidth = max(1024, (int) DIM); mint wHeight = max(1024, (int) DIM); static int clicked = 0; // Particle data static cData *deviceParticles = NULL; // particle positions in host memory static cData *hostParticles = NULL; // particle positions in host memory static int lastx = 0, lasty = 0; // Texture pitch size_t tPitch = 0; // Now this is compatible with gcc in 64-bit static void setTimeDelta(float newDT); static void setViscosity(float newVIS); static void setForce(float newFORCE); static void setForceRadius(mint newFR); static void addForces(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r); static void advectVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt); static void diffuseProject(cData *vx, cData *vy, int dx, int dy, float dt, float visc); static void updateVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy); static void advectParticles(cData *v, int dx, int dy, float dt); static cData * simulateFluids(mint dim); static void resetParticles(); static void setParticles(double * data, mint n); static void click(int button, int updown, int x, int y); static void motion(int x, int y); static void FluidDynamics_cleanup(); static void initialize(mint dim); //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // from cutil_math.h //////////////////////////////////////////////////////////////////////////////// // float functions //////////////////////////////////////////////////////////////////////////////// // lerp inline __device__ __host__ float lerp(float a, float b, float t) { return a + t*(b-a); } // clamp inline __device__ __host__ float clamp(float f, float a, float b) { return fmaxf(a, fminf(f, b)); } //////////////////////////////////////////////////////////////////////////////// // float2 functions //////////////////////////////////////////////////////////////////////////////// // additional constructors inline __host__ __device__ float2 make_float2(float s) { return make_float2(s, s); } inline __host__ __device__ float2 make_float2(int2 a) { return make_float2(float(a.x), float(a.y)); } // negate inline __host__ __device__ float2 operator-(float2 &a) { return make_float2(-a.x, -a.y); } // addition inline __host__ __device__ float2 operator+(float2 a, float2 b) { return make_float2(a.x + b.x, a.y + b.y); } inline __host__ __device__ void operator+=(float2 &a, float2 b) { a.x += b.x; a.y += b.y; } // subtract inline __host__ __device__ float2 operator-(float2 a, float2 b) { return make_float2(a.x - b.x, a.y - b.y); } inline __host__ __device__ void operator-=(float2 &a, float2 b) { a.x -= b.x; a.y -= b.y; } // multiply inline __host__ __device__ float2 operator*(float2 a, float2 b) { return make_float2(a.x * b.x, a.y * b.y); } inline __host__ __device__ float2 operator*(float2 a, float s) { return make_float2(a.x * s, a.y * s); } inline __host__ __device__ float2 operator*(float s, float2 a) { return make_float2(a.x * s, a.y * s); } inline __host__ __device__ void operator*=(float2 &a, float s) { a.x *= s; a.y *= s; } // divide inline __host__ __device__ float2 operator/(float2 a, float2 b) { return make_float2(a.x / b.x, a.y / b.y); } inline __host__ __device__ float2 operator/(float2 a, float s) { float inv = 1.0f / s; return a * inv; } inline __host__ __device__ float2 operator/(float s, float2 a) { float inv = 1.0f / s; return a * inv; } inline __host__ __device__ void operator/=(float2 &a, float s) { float inv = 1.0f / s; a *= inv; } // lerp inline __device__ __host__ float2 lerp(float2 a, float2 b, float t) { return a + t*(b-a); } // clamp inline __device__ __host__ float2 clamp(float2 v, float a, float b) { return make_float2(clamp(v.x, a, b), clamp(v.y, a, b)); } inline __device__ __host__ float2 clamp(float2 v, float2 a, float2 b) { return make_float2(clamp(v.x, a.x, b.x), clamp(v.y, a.y, b.y)); } // dot product inline __host__ __device__ float dot(float2 a, float2 b) { return a.x * b.x + a.y * b.y; } // length inline __host__ __device__ float length(float2 v) { return sqrtf(dot(v, v)); } // normalize inline __host__ __device__ float2 normalize(float2 v) { float invLen = rsqrtf(dot(v, v)); return v * invLen; } // floor inline __host__ __device__ float2 floor(const float2 v) { return make_float2(floor(v.x), floor(v.y)); } // reflect inline __host__ __device__ float2 reflect(float2 i, float2 n) { return i - 2.0f * n * dot(n,i); } // absolute value inline __host__ __device__ float2 fabs(float2 v) { return make_float2(fabs(v.x), fabs(v.y)); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// void setupTexture(int x, int y) { cudaError_t err; // Wrap mode appears to be the new default texref.filterMode = cudaFilterModeLinear; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float2>(); err = cudaMallocArray(&array, &desc, y, x); Expect(err == cudaSuccess); } void bindTexture(void) { if (array != NULL) { cudaError_t err; err = cudaBindTextureToArray(texref, array); Expect(err == cudaSuccess); } } void unbindTexture(void) { if (array != NULL) { cudaError_t err; err = cudaUnbindTexture(texref); Expect(err == cudaSuccess); } } void updateTexture(cData *data, size_t wib, size_t h, size_t pitch) { if (array != NULL) { cudaError_t err; err = cudaMemcpy2DToArray(array, 0, 0, data, pitch, wib, h, cudaMemcpyDeviceToDevice); Expect(err == cudaSuccess); } } void deleteTexture(void) { if (array != NULL) { cudaFreeArray(array); } array = NULL; } // Note that these kernels are designed to work with arbitrary // domain sizes, not just domains that are multiples of the tile // size. Therefore, we have extra code that checks to make sure // a given thread location falls within the domain boundaries in // both X and Y. Also, the domain is covered by looping over // multiple elements in the Y direction, while there is a one-to-one // mapping between threads in X and the tile size in X. // Nolan Goodnight 9/22/06 // This method adds constant force vectors to the velocity field // stored in 'v' according to v(x,t+1) = v(x,t) + dt * f. __global__ void addForces_k(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r, size_t pitch) { int tx = threadIdx.x; int ty = threadIdx.y; cData *fj = (cData*)((char*)v + (ty + spy) * pitch) + tx + spx; cData vterm = *fj; tx -= r; ty -= r; float s = 1.f / (1.f + tx*tx*tx*tx + ty*ty*ty*ty); vterm.x += s * fx; vterm.y += s * fy; *fj = vterm; } // This method performs the velocity advection step, where we // trace velocity vectors back in time to update each grid cell. // That is, v(x,t+1) = v(p(x,-dt),t). Here we perform bilinear // interpolation in the velocity space. __global__ void advectVelocity_k(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt, int lb) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; cData vterm, ploc; float vxterm, vyterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * pdx + gtidx; vterm = tex2D(texref, (float)gtidx, (float)fi); ploc.x = (gtidx + 0.5f) - (dt * vterm.x * dx); ploc.y = (fi + 0.5f) - (dt * vterm.y * dy); vterm = tex2D(texref, ploc.x, ploc.y); vxterm = vterm.x; vyterm = vterm.y; vx[fj] = vxterm; vy[fj] = vyterm; } } } } // This method performs velocity diffusion and forces mass conservation // in the frequency domain. The inputs 'vx' and 'vy' are complex-valued // arrays holding the Fourier coefficients of the velocity field in // X and Y. Diffusion in this space takes a simple form described as: // v(k,t) = v(k,t) / (1 + visc * dt * k^2), where visc is the viscosity, // and k is the wavenumber. The projection step forces the Fourier // velocity vectors to be orthogonal to the vectors for each // wavenumber: v(k,t) = v(k,t) - ((k dot v(k,t) * k) / k^2. __global__ void diffuseProject_k(cData *vx, cData *vy, int dx, int dy, float dt, float visc, int lb) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; cData xterm, yterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * dx + gtidx; xterm = vx[fj]; yterm = vy[fj]; // Compute the index of the wavenumber based on the // data order produced by a standard NN FFT. int iix = gtidx; int iiy = (fi>dy/2)?(fi-(dy)):fi; // Velocity diffusion float kk = (float)(iix * iix + iiy * iiy); // k^2 float diff = 1.f / (1.f + visc * dt * kk); xterm.x *= diff; xterm.y *= diff; yterm.x *= diff; yterm.y *= diff; // Velocity projection if (kk > 0.f) { float rkk = 1.f / kk; // Real portion of velocity projection float rkp = (iix * xterm.x + iiy * yterm.x); // Imaginary portion of velocity projection float ikp = (iix * xterm.y + iiy * yterm.y); xterm.x -= rkk * rkp * iix; xterm.y -= rkk * ikp * iix; yterm.x -= rkk * rkp * iiy; yterm.y -= rkk * ikp * iiy; } vx[fj] = xterm; vy[fj] = yterm; } } } } // This method updates the velocity field 'v' using the two complex // arrays from the previous step: 'vx' and 'vy'. Here we scale the // real components by 1/(dx*dy) to account for an unnormalized FFT. __global__ void updateVelocity_k(cData *v, float *vx, float *vy, int dx, int pdx, int dy, int lb, size_t pitch) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; float vxterm, vyterm; cData nvterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fjr = fi * pdx + gtidx; vxterm = vx[fjr]; vyterm = vy[fjr]; // Normalize the result of the inverse FFT float scale = 1.f / (dx * dy); nvterm.x = vxterm * scale; nvterm.y = vyterm * scale; cData *fj = (cData*)((char*)v + fi * pitch) + gtidx; *fj = nvterm; } } // If this thread is inside the domain in Y } // If this thread is inside the domain in X } // This method updates the hostParticles by moving particle positions // according to the velocity field and time step. That is, for each // particle: p(t+1) = p(t) + dt * v(p(t)). __global__ void advectParticles_k(cData *part, cData *v, int dx, int dy, float dt, int lb, size_t pitch) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; // gtidx is the domain location in x for this thread cData pterm, vterm; if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * dx + gtidx; pterm = part[fj]; int xvi = ((int)(pterm.x * dx)); int yvi = ((int)(pterm.y * dy)); vterm = *((cData*)((char*)v + yvi * pitch) + xvi); pterm.x += dt * vterm.x; pterm.x = pterm.x - (int)pterm.x; pterm.x += 1.f; pterm.x = pterm.x - (int)pterm.x; pterm.y += dt * vterm.y; pterm.y = pterm.y - (int)pterm.y; pterm.y += 1.f; pterm.y = pterm.y - (int)pterm.y; part[fj] = pterm; } } // If this thread is inside the domain in Y } // If this thread is inside the domain in X } static void setTimeDelta(float newDT) { DT = newDT; return ; } static void setViscosity(float newVIS) { VIS = newVIS; return ; } static void setForce(float newFORCE) { FORCE = newFORCE*DIM; return ; } static void setForceRadius(mint newFR) { FR = newFR; return ; } static void addForces(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r) { dim3 tids(2*r+1, 2*r+1); addForces_k<<<1, tids>>>(v, dx, dy, spx, spy, fx, fy, r, tPitch); } static void advectVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); updateTexture(v, DIM*sizeof(cData), DIM, tPitch); advectVelocity_k<<<grid, tids>>>(v, vx, vy, dx, pdx, dy, dt, TILEY/TIDSY); } static void diffuseProject(cData *vx, cData *vy, int dx, int dy, float dt, float visc) { // Forward FFT cufftExecR2C(planr2c, (cufftReal*)vx, (cufftComplex*)vx); cufftExecR2C(planr2c, (cufftReal*)vy, (cufftComplex*)vy); uint3 grid = make_uint3((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1), 1); uint3 tids = make_uint3(TIDSX, TIDSY, 1); diffuseProject_k<<<grid, tids>>>(vx, vy, dx, dy, dt, visc, TILEY/TIDSY); // Inverse FFT cufftExecC2R(planc2r, (cufftComplex*)vx, (cufftReal*)vx); cufftExecC2R(planc2r, (cufftComplex*)vy, (cufftReal*)vy); } static void updateVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); updateVelocity_k<<<grid, tids>>>(v, vx, vy, dx, pdx, dy, TILEY/TIDSY, tPitch); } static void advectParticles(cData *v, int dx, int dy, float dt) { cudaError_t err; dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); advectParticles_k<<<grid, tids>>>(deviceParticles, v, dx, dy, dt, TILEY/TIDSY, tPitch); err = cudaMemcpy(hostParticles, deviceParticles, DS*sizeof(cData), cudaMemcpyDeviceToHost); Expect(err == cudaSuccess); } static cData * simulateFluids(mint dim) { if (dim != DIM && dim != -1) { FluidDynamics_cleanup(); initialize(dim); } // simulate fluid advectVelocity(dvfield, (float*)vxfield, (float*)vyfield, DIM, RPADW, DIM, DT); diffuseProject(vxfield, vyfield, CPADW, DIM, DT, VIS); updateVelocity(dvfield, (float*)vxfield, (float*)vyfield, DIM, RPADW, DIM); advectParticles(dvfield, DIM, DIM, DT); return hostParticles; } static void resetParticles() { cudaError_t err; int i, j; memset(hvfield, 0, sizeof(cData) * DS); cudaMemcpy(dvfield, hvfield, sizeof(cData) * DS, cudaMemcpyHostToDevice); for (i = 0; i < DIM; i++) { for (j = 0; j < DIM; j++) { hostParticles[i*DIM+j].x = (j+0.5f+(rand()/(float)RAND_MAX - 0.5f))/DIM; hostParticles[i*DIM+j].y = (i+0.5f+(rand()/(float)RAND_MAX - 0.5f))/DIM; } } err = cudaMemcpy(deviceParticles, hostParticles, DS*sizeof(cData), cudaMemcpyHostToDevice); Expect(err == cudaSuccess); } static void setParticles(double * data, mint n) { cudaError_t err; int i, j; memset(hvfield, 0, sizeof(cData) * DS); cudaMemcpy(dvfield, hvfield, sizeof(cData) * DS, cudaMemcpyHostToDevice); for (i = 0; i < DIM; i++) { for (j = 0; j < DIM; j++) { if (2*(i*DIM + j) < n) { hostParticles[i*DIM+j].x = static_cast<float>(data[2*(i*DIM + j)]); hostParticles[i*DIM+j].y = static_cast<float>(data[2*(i*DIM + j) + 1]); } } } err = cudaMemcpy(deviceParticles, hostParticles, DS*sizeof(cData), cudaMemcpyHostToDevice); Expect(err == cudaSuccess); } static void click(int button, int updown, int x, int y) { lastx = x; lasty = y; clicked = !clicked; } static void motion(int x, int y) { // Convert motion coordinates to domain float fx = (lastx / (float)wWidth); float fy = (lasty / (float)wHeight); int nx = (int)(fx * DIM); int ny = (int)(fy * DIM); if (clicked && nx < DIM-FR && nx > FR-1 && ny < DIM-FR && ny > FR-1) { int ddx = x - lastx; int ddy = y - lasty; fx = ddx / (float)wWidth; fy = ddy / (float)wHeight; int spy = ny-FR; int spx = nx-FR; addForces(dvfield, DIM, DIM, spx, spy, FORCE * DT * fx, FORCE * DT * fy, FR); lastx = x; lasty = y; } } static void FluidDynamics_cleanup() { unbindTexture(); deleteTexture(); // Free all host and device resources if (hvfield != NULL) { free(hvfield); hvfield = NULL; } if (hostParticles != NULL) { free(hostParticles); hostParticles = NULL; } if (dvfield != NULL) { cudaFree(dvfield); dvfield = NULL; } if (deviceParticles != NULL) { cudaFree(deviceParticles); deviceParticles = NULL; } if (vxfield != NULL) { cudaFree(vxfield); vxfield = NULL; } if (vyfield != NULL) { cudaFree(vyfield); vyfield = NULL; } cufftDestroy(planr2c); cufftDestroy(planc2r); } static void initialize(mint dim) { if (dim != -1) { SetDim(dim); } hvfield = (cData*)malloc(sizeof(cData) * DS); memset(hvfield, 0, sizeof(cData) * DS); // Allocate and initialize device data cudaMallocPitch((void**)&dvfield, &tPitch, sizeof(cData)*DIM, DIM); cudaMemcpy(dvfield, hvfield, sizeof(cData) * DS, cudaMemcpyHostToDevice); // Temporary complex velocity field data cudaMalloc((void**)&vxfield, sizeof(cData) * PDS); cudaMalloc((void**)&vyfield, sizeof(cData) * PDS); setupTexture(DIM, DIM); bindTexture(); // Create particle array hostParticles = (cData*)malloc(sizeof(cData) * DS); cudaMalloc((void **) &deviceParticles, sizeof(cData) * DS); memset(hostParticles, 0, sizeof(cData) * DS); resetParticles(); // Create CUFFT transform plan configuration cufftPlan2d(&planr2c, DIM, DIM, CUFFT_R2C); cufftPlan2d(&planc2r, DIM, DIM, CUFFT_C2R); // TODO: update kernels to use the new unpadded memory layout for perf // rather than the old FFTW-compatible layout cufftSetCompatibilityMode(planr2c, CUFFT_COMPATIBILITY_FFTW_PADDING); cufftSetCompatibilityMode(planc2r, CUFFT_COMPATIBILITY_FFTW_PADDING); } EXTERN_C DLLEXPORT int oFluidDynamics_SetTimeDelta(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { float val; Assert(Argc == 1); val = static_cast<float>(MArgument_getReal(Args[0])); setTimeDelta(val); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_SetViscosity(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { float val; Assert(Argc == 1); val = static_cast<float>(MArgument_getReal(Args[0])); setViscosity(val); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_SetForce(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { float val; Assert(Argc == 1); val = static_cast<float>(MArgument_getReal(Args[0])); setForce(val); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_SetForceRadius(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { mint val; Assert(Argc == 1); val = MArgument_getInteger(Args[0]); setForceRadius(val); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_Initialize(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { mint dim; dim = MArgument_getInteger(Args[0]); initialize(dim); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_Motion(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { mint x, y; x = MArgument_getInteger(Args[0]); y = MArgument_getInteger(Args[1]); motion(x, y); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_MouseMovement(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { mint button, updown, x, y; button = MArgument_getInteger(Args[0]); updown = MArgument_getInteger(Args[1]); x = MArgument_getInteger(Args[2]); y = MArgument_getInteger(Args[3]); click(button, updown, x, y); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_StepAsParticles(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { MTensor tensor; cData * mem; mint dim; tensor = MArgument_getMTensor(Args[0]); dim = MArgument_getInteger(Args[1]); mem = simulateFluids(dim); Expect(mem != NULL); for (mint ii = 0; ii < libData->MTensor_getFlattenedLength(tensor)/2; ii++) { libData->MTensor_getRealData(tensor)[2*ii] = static_cast<double>(mem[ii].x); libData->MTensor_getRealData(tensor)[2*ii+1] = static_cast<double>(mem[ii].y); } MArgument_setMTensor(Res, tensor); libData->MTensor_disown(tensor); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_StepAsPixels(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { mint dim; MTensor tensor; cData * mem; double * tensorData; tensor = MArgument_getMTensor(Args[0]); dim = MArgument_getInteger(Args[1]); mem = simulateFluids(dim); Expect(mem != NULL); tensorData = libData->MTensor_getRealData(tensor); memset(tensorData, 0, libData->MTensor_getFlattenedLength(tensor)*sizeof(double)); for (mint ii = 0; ii < libData->MTensor_getFlattenedLength(tensor); ii++) { tensorData[(int) mem[ii].x + ((int) mem[ii].y * libData->MTensor_getDimensions(tensor)[1])] += 1; } MArgument_setMTensor(Res, tensor); libData->MTensor_disown(tensor); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_ResetParticles(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { resetParticles(); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT int oFluidDynamics_SetParticles(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { MTensor tensor; tensor = MArgument_getMTensor(Args[0]); setParticles(libData->MTensor_getRealData(tensor), libData->MTensor_getFlattenedLength(tensor)); libData->MTensor_disown(tensor); return LIBRARY_NO_ERROR; } EXTERN_C DLLEXPORT mint oFluidDynamics_Uninitialize(WolframLibraryData libData, mint Argc,MArgument * Args, MArgument Res) { FluidDynamics_cleanup(); return LIBRARY_NO_ERROR; }
868bf4477a81d7d31634f3b2d43459a1691aef2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "common.h" #include "type.h" #define TPB 512 #define GET_ID() (threadIdx.x+blockDim.x*blockIdx.x) #define MAX(a,b) ((a)>(b)?(a):(b)) #define CUFREE(x) {if(x)hipFree(x);} static inline void get_gpulen(const int n, int *const gpulen) { if (n > TPB*512) *gpulen = TPB*512; else *gpulen = (int) n; } __global__ void setup_curand_rng(const int seed, hiprandState_t *state, const int gpulen) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx >= gpulen) return; hiprand_init(seed, idx, 0, state + idx); } __global__ void runif_kernel(hiprandState_t *state, const REAL min, const REAL max, const int gpulen, REAL *x) { int idx = threadIdx.x + blockDim.x*blockIdx.x; if (idx >= gpulen) return; REAL tmp = hiprand_uniform(state + idx); x[idx] = min + (max - min)*tmp; } int runif(const unsigned int seed, const int n, const REAL min, const REAL max, REAL *x) { int gpulen; hiprandState_t *state; get_gpulen(n, &gpulen); hipMalloc(&state, gpulen*sizeof(*state)); if (state == NULL) return ERR_CUMALLOC; int runs = (int) MAX((int) n/gpulen, 1); int rem = (int) MAX((n - (int)(runs*gpulen)), 0); int runlen = MAX(gpulen/TPB, 1); hipLaunchKernelGGL(( setup_curand_rng), dim3(runlen), dim3(TPB), 0, 0, seed, state, gpulen); for (int i=0; i<runs; i++) hipLaunchKernelGGL(( runif_kernel), dim3(runlen), dim3(TPB), 0, 0, state, min, max, gpulen, x); if (rem) { runlen = MAX(rem/TPB, 1); hipLaunchKernelGGL(( runif_kernel), dim3(runlen), dim3(TPB), 0, 0, state, min, max, gpulen, x); } hipFree(state); return ERR_OK; } int gen_setup(const int m_local, const int n, REAL **x, REAL **y, REAL **z) { hipMalloc(x, m_local*n*sizeof(**x)); hipMalloc(y, n*sizeof(**y)); hipMalloc(z, m_local*sizeof(**z)); if (*x == NULL || *y == NULL || *z == NULL) return ERR_CUMALLOC; return ERR_OK; }
868bf4477a81d7d31634f3b2d43459a1691aef2d.cu
#include <curand.h> #include <curand_kernel.h> #include "common.h" #include "type.h" #define TPB 512 #define GET_ID() (threadIdx.x+blockDim.x*blockIdx.x) #define MAX(a,b) ((a)>(b)?(a):(b)) #define CUFREE(x) {if(x)cudaFree(x);} static inline void get_gpulen(const int n, int *const gpulen) { if (n > TPB*512) *gpulen = TPB*512; else *gpulen = (int) n; } __global__ void setup_curand_rng(const int seed, curandState *state, const int gpulen) { int idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx >= gpulen) return; curand_init(seed, idx, 0, state + idx); } __global__ void runif_kernel(curandState *state, const REAL min, const REAL max, const int gpulen, REAL *x) { int idx = threadIdx.x + blockDim.x*blockIdx.x; if (idx >= gpulen) return; REAL tmp = curand_uniform(state + idx); x[idx] = min + (max - min)*tmp; } int runif(const unsigned int seed, const int n, const REAL min, const REAL max, REAL *x) { int gpulen; curandState *state; get_gpulen(n, &gpulen); cudaMalloc(&state, gpulen*sizeof(*state)); if (state == NULL) return ERR_CUMALLOC; int runs = (int) MAX((int) n/gpulen, 1); int rem = (int) MAX((n - (int)(runs*gpulen)), 0); int runlen = MAX(gpulen/TPB, 1); setup_curand_rng<<<runlen, TPB>>>(seed, state, gpulen); for (int i=0; i<runs; i++) runif_kernel<<<runlen, TPB>>>(state, min, max, gpulen, x); if (rem) { runlen = MAX(rem/TPB, 1); runif_kernel<<<runlen, TPB>>>(state, min, max, gpulen, x); } cudaFree(state); return ERR_OK; } int gen_setup(const int m_local, const int n, REAL **x, REAL **y, REAL **z) { cudaMalloc(x, m_local*n*sizeof(**x)); cudaMalloc(y, n*sizeof(**y)); cudaMalloc(z, m_local*sizeof(**z)); if (*x == NULL || *y == NULL || *z == NULL) return ERR_CUMALLOC; return ERR_OK; }
c7ed4225f978faec5f4626be3a03d36cda53e08a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdint.h> #include "kernel.hip" int main(int argc, char* argv[]) { float *in_h; float *in_d; unsigned int num_elements, memory_block_size, thread_block_size; int mode, cache_warmup_en; num_elements = 1024 * 1024; memory_block_size = 64; thread_block_size = 512; mode = 0; cache_warmup_en = 0; if (argc == 6) { num_elements = atoi(argv[1]); thread_block_size = atoi(argv[2]); memory_block_size = atoi(argv[3]); mode = atoi(argv[4]); cache_warmup_en = atoi(argv[5]); if (mode < 0 || mode > 7) { printf("ERROR: Mode can only be an integer within [0, 7]!"); exit(0); } } else { printf("\n Invalid input parameters!" "\n Usage: ./atmem_bench [num_elements] [thread_block_size] [memory_block_size] [mode=0..7] [cache_warmup_en=0..1])" "\n"); exit(0); } // Print all parameters printf("Number of elements = %u\nThread Block size = %u\nMemory Block size (Interval in Mode 5) = %u\nCache warmup = %s\nMode = %d (%s)\n", num_elements, thread_block_size, memory_block_size, (cache_warmup_en == 0) ? "DISABLED" : "ENABLED", mode, (mode == 0) ? "BASELINE" : (mode == 1) ? "ATOMIC" : (mode == 2) ? "1 THREAD TO 1 ELEMENT" : (mode == 3) ? "1 WARP TO 1 ELEMENT" : (mode == 4) ? "1 WARP TO 32 ELEMENTS" : (mode == 5) ? "1 WARP TO 32 FAR ELEMENTS" : (mode == 6) ? "1 VECTOR ATOMIC ADD" : (mode == 7) ? "CLOCK64() FUNCTION OVERHEAD" : "ERROR"); // Host array in_h = (float*)malloc(num_elements*sizeof(float)); for (int i = 0; i<num_elements; i++) { in_h[i] = (float)(rand() % 1000) / 100.0; } // Print input #if IS_ARRAY_PRINT_ENABLED == 1 printf("Input: "); for (int i = 0; i<num_elements; i++) { printf("%f ", in_h[i]); } printf("\n"); #endif printf("Allocating device variables...\n"); hipMalloc((void**)&in_d, num_elements * sizeof(float)); hipDeviceSynchronize(); // H to D printf("Copying data from host to device...\n"); hipMemcpy(in_d, in_h, num_elements * sizeof(float), hipMemcpyHostToDevice); hipDeviceSynchronize(); // Kernel Launch printf("Launching kernel...\n"); atmem_bench(in_d, num_elements, thread_block_size, memory_block_size, mode, cache_warmup_en); // D to H printf("Copying data from device to host...\n"); hipMemcpy(in_h, in_d, num_elements * sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Print output #if IS_ARRAY_PRINT_ENABLED == 1 printf("Output: "); for (int i = 0; i<num_elements; i++) { printf("%f ", in_h[i]); } printf("\n"); #endif // Free memory hipFree(in_d); free(in_h); return 0; }
c7ed4225f978faec5f4626be3a03d36cda53e08a.cu
#include <stdio.h> #include <stdint.h> #include "kernel.cu" int main(int argc, char* argv[]) { float *in_h; float *in_d; unsigned int num_elements, memory_block_size, thread_block_size; int mode, cache_warmup_en; num_elements = 1024 * 1024; memory_block_size = 64; thread_block_size = 512; mode = 0; cache_warmup_en = 0; if (argc == 6) { num_elements = atoi(argv[1]); thread_block_size = atoi(argv[2]); memory_block_size = atoi(argv[3]); mode = atoi(argv[4]); cache_warmup_en = atoi(argv[5]); if (mode < 0 || mode > 7) { printf("ERROR: Mode can only be an integer within [0, 7]!"); exit(0); } } else { printf("\n Invalid input parameters!" "\n Usage: ./atmem_bench [num_elements] [thread_block_size] [memory_block_size] [mode=0..7] [cache_warmup_en=0..1])" "\n"); exit(0); } // Print all parameters printf("Number of elements = %u\nThread Block size = %u\nMemory Block size (Interval in Mode 5) = %u\nCache warmup = %s\nMode = %d (%s)\n", num_elements, thread_block_size, memory_block_size, (cache_warmup_en == 0) ? "DISABLED" : "ENABLED", mode, (mode == 0) ? "BASELINE" : (mode == 1) ? "ATOMIC" : (mode == 2) ? "1 THREAD TO 1 ELEMENT" : (mode == 3) ? "1 WARP TO 1 ELEMENT" : (mode == 4) ? "1 WARP TO 32 ELEMENTS" : (mode == 5) ? "1 WARP TO 32 FAR ELEMENTS" : (mode == 6) ? "1 VECTOR ATOMIC ADD" : (mode == 7) ? "CLOCK64() FUNCTION OVERHEAD" : "ERROR"); // Host array in_h = (float*)malloc(num_elements*sizeof(float)); for (int i = 0; i<num_elements; i++) { in_h[i] = (float)(rand() % 1000) / 100.0; } // Print input #if IS_ARRAY_PRINT_ENABLED == 1 printf("Input: "); for (int i = 0; i<num_elements; i++) { printf("%f ", in_h[i]); } printf("\n"); #endif printf("Allocating device variables...\n"); cudaMalloc((void**)&in_d, num_elements * sizeof(float)); cudaDeviceSynchronize(); // H to D printf("Copying data from host to device...\n"); cudaMemcpy(in_d, in_h, num_elements * sizeof(float), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); // Kernel Launch printf("Launching kernel...\n"); atmem_bench(in_d, num_elements, thread_block_size, memory_block_size, mode, cache_warmup_en); // D to H printf("Copying data from device to host...\n"); cudaMemcpy(in_h, in_d, num_elements * sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // Print output #if IS_ARRAY_PRINT_ENABLED == 1 printf("Output: "); for (int i = 0; i<num_elements; i++) { printf("%f ", in_h[i]); } printf("\n"); #endif // Free memory cudaFree(in_d); free(in_h); return 0; }
0d6a75944e42c00c955b18843839212d9c1b410d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "./kern.cuh" #include "kern_helper_hip.cuh" const uint32_t WARP_SIZE = 32, BATCH_UNROLL = 4; using namespace megdnn; using namespace cuda; using namespace convolution3d; using namespace chanwise; namespace { template <typename T, uint32_t nr_thpf> __global__ void kern_bwd_filter( T* flt_grad, const T* src, const T* dst_grad, Param param) { const uint32_t N = param.batch, IC = param.src_chl, ID = param.src_d, IH = param.src_h, IW = param.src_w, CHL_MUL = param.chl_mul, FD = param.flt_d, FH = param.flt_h, FW = param.flt_w, PD = param.pad_d, PH = param.pad_h, PW = param.pad_w, SD = param.stride_d, SH = param.stride_h, SW = param.stride_w, OD = param.out_d, OH = param.out_h, OW = param.out_w, SRC_BATCH_STRIDE = IC * ID * IH * IW, DST_BATCH_STRIDE = IC * CHL_MUL * OD * OH * OW, BLKDIM_X = blockDim.x / nr_thpf, THREADID_X = threadIdx.x / nr_thpf, OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X; uint32_t ic, chl_mul, fd, fh, fw; { uint32_t i = OUT_IDX; i = div_mod(i, FW, fw); i = div_mod(i, FH, fh); i = div_mod(i, FD, fd); i = div_mod(i, CHL_MUL, chl_mul); ic = i; } if (ic >= IC) { return; } src += ic * ID * IH * IW; dst_grad += (ic * CHL_MUL + chl_mul) * OD * OH * OW; const uint32_t od_lo = max(int32_t(PD - fd + SD - 1), 0) / SD, od_hi = min((ID - 1 + PD - fd) / SD + 1, OD), oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH, oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH), ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW, ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW), oblk_d = od_hi - od_lo, oblk_h = oh_hi - oh_lo, oblk_w = ow_hi - ow_lo, oblk_tot = oblk_d * oblk_h * oblk_w * ((N + BATCH_UNROLL - 1) / BATCH_UNROLL), tid = threadIdx.x % nr_thpf; if (ID + PD < fd + 1 || od_lo >= od_hi || IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 || ow_lo >= ow_hi) { if (!tid) flt_grad[OUT_IDX] = 0; return; } T sum(0); for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) { uint32_t n, oh, ow, od; n = div_mod(div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh), oblk_d, od) * BATCH_UNROLL; od += od_lo; oh += oh_lo; ow += ow_lo; uint32_t id = od * SD - PD + fd, ih = oh * SH - PH + fh, iw = ow * SW - PW + fw, soff = id * IH * IW + ih * IW + iw + n * SRC_BATCH_STRIDE, doff = od * OH * OW + oh * OW + ow + n * DST_BATCH_STRIDE; #pragma unroll for (uint32_t i = 0; i < BATCH_UNROLL; ++i) { if (!i || n + i < N) { sum += src[soff] * dst_grad[doff]; } soff += SRC_BATCH_STRIDE; doff += DST_BATCH_STRIDE; } } if (nr_thpf == 1) { flt_grad[OUT_IDX] = sum; } else { // reduce all sums in a block extern __shared__ uint8_t shared_storage[]; volatile T* thread_sum = reinterpret_cast<T*>(shared_storage); thread_sum += THREADID_X * nr_thpf; thread_sum[tid] = sum; #pragma unroll for (uint32_t i = nr_thpf / 2; i; i >>= 1) { bool cond = nr_thpf >= i * 2 && tid < i; if (i >= WARP_SIZE) { __syncthreads(); } T v0 = thread_sum[tid], v1 = v0 + thread_sum[tid + i]; thread_sum[tid] = cond ? v1 : v0; } if (!tid) flt_grad[OUT_IDX] = thread_sum[0]; } } } // anonymous namespace template <typename T> void convolution3d::chanwise::run_bwd_filter( T* filter_grad, const T* src, const T* dst_grad, const Param& param, hipStream_t stream) { void (*kern)(T*, const T*, const T*, Param) = NULL; uint32_t nr_thread = query_blocksize_for_kernel(kern_bwd_filter<T, 1024>), nr_thpf = ::min( nr_thread, std::max<uint32_t>( 1, param.out_d * param.out_h * param.out_w * param.batch / (BATCH_UNROLL * 16))); // find nearest power-of-2 of nr_thpf do { #define CK(_n) \ if (nr_thpf >= _n) { \ kern = kern_bwd_filter<T, _n>; \ nr_thpf = _n; \ break; \ } CK(1 << 10); CK(1 << 9); CK(1 << 8); CK(1 << 7); CK(1 << 6); CK(1 << 5); CK(1 << 4); CK(1 << 3); CK(1 << 2); CK(1 << 1); CK(1 << 0); #undef CK } while (0); megdnn_assert(kern); nr_thread = query_blocksize_for_kernel(kern); uint32_t nr_flt_per_blk = nr_thread / nr_thpf; while (nr_flt_per_blk * nr_thpf % WARP_SIZE) --nr_flt_per_blk; megdnn_assert(nr_flt_per_blk); int nr_block = DIVUP( param.flt_d * param.flt_h * param.flt_w * param.src_chl * param.chl_mul, nr_flt_per_blk); nr_thread = nr_flt_per_blk * nr_thpf; uint32_t shared = nr_thread * 2 * sizeof(T); hipLaunchKernelGGL(( kern), dim3(nr_block), dim3(nr_thread), shared, stream, filter_grad, src, dst_grad, param); after_kernel_launch(); } namespace megdnn { namespace cuda { namespace convolution3d { namespace chanwise { #define DO_INST(_ct) \ template void run_bwd_filter( \ _ct*, const _ct*, const _ct*, const Param&, hipStream_t); #define INST(_dt) DO_INST(DTypeTrait<_dt>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(INST) #undef INST #undef DO_INST } // namespace chanwise } // namespace convolution3d } // namespace cuda } // namespace megdnn // vim: syntax=cuda.doxygen
0d6a75944e42c00c955b18843839212d9c1b410d.cu
#include "./kern.cuh" #include "./kern_helper.cuh" const uint32_t WARP_SIZE = 32, BATCH_UNROLL = 4; using namespace megdnn; using namespace cuda; using namespace convolution3d; using namespace chanwise; namespace { template <typename T, uint32_t nr_thpf> __global__ void kern_bwd_filter( T* flt_grad, const T* src, const T* dst_grad, Param param) { const uint32_t N = param.batch, IC = param.src_chl, ID = param.src_d, IH = param.src_h, IW = param.src_w, CHL_MUL = param.chl_mul, FD = param.flt_d, FH = param.flt_h, FW = param.flt_w, PD = param.pad_d, PH = param.pad_h, PW = param.pad_w, SD = param.stride_d, SH = param.stride_h, SW = param.stride_w, OD = param.out_d, OH = param.out_h, OW = param.out_w, SRC_BATCH_STRIDE = IC * ID * IH * IW, DST_BATCH_STRIDE = IC * CHL_MUL * OD * OH * OW, BLKDIM_X = blockDim.x / nr_thpf, THREADID_X = threadIdx.x / nr_thpf, OUT_IDX = blockIdx.x * BLKDIM_X + THREADID_X; uint32_t ic, chl_mul, fd, fh, fw; { uint32_t i = OUT_IDX; i = div_mod(i, FW, fw); i = div_mod(i, FH, fh); i = div_mod(i, FD, fd); i = div_mod(i, CHL_MUL, chl_mul); ic = i; } if (ic >= IC) { return; } src += ic * ID * IH * IW; dst_grad += (ic * CHL_MUL + chl_mul) * OD * OH * OW; const uint32_t od_lo = max(int32_t(PD - fd + SD - 1), 0) / SD, od_hi = min((ID - 1 + PD - fd) / SD + 1, OD), oh_lo = max(int32_t(PH - fh + SH - 1), 0) / SH, oh_hi = min((IH - 1 + PH - fh) / SH + 1, OH), ow_lo = max(int32_t(PW - fw + SW - 1), 0) / SW, ow_hi = min((IW - 1 + PW - fw) / SW + 1, OW), oblk_d = od_hi - od_lo, oblk_h = oh_hi - oh_lo, oblk_w = ow_hi - ow_lo, oblk_tot = oblk_d * oblk_h * oblk_w * ((N + BATCH_UNROLL - 1) / BATCH_UNROLL), tid = threadIdx.x % nr_thpf; if (ID + PD < fd + 1 || od_lo >= od_hi || IH + PH < fh + 1 || oh_lo >= oh_hi || IW + PW < fw + 1 || ow_lo >= ow_hi) { if (!tid) flt_grad[OUT_IDX] = 0; return; } T sum(0); for (uint32_t oblk_idx = tid; oblk_idx < oblk_tot; oblk_idx += nr_thpf) { uint32_t n, oh, ow, od; n = div_mod(div_mod(div_mod(oblk_idx, oblk_w, ow), oblk_h, oh), oblk_d, od) * BATCH_UNROLL; od += od_lo; oh += oh_lo; ow += ow_lo; uint32_t id = od * SD - PD + fd, ih = oh * SH - PH + fh, iw = ow * SW - PW + fw, soff = id * IH * IW + ih * IW + iw + n * SRC_BATCH_STRIDE, doff = od * OH * OW + oh * OW + ow + n * DST_BATCH_STRIDE; #pragma unroll for (uint32_t i = 0; i < BATCH_UNROLL; ++i) { if (!i || n + i < N) { sum += src[soff] * dst_grad[doff]; } soff += SRC_BATCH_STRIDE; doff += DST_BATCH_STRIDE; } } if (nr_thpf == 1) { flt_grad[OUT_IDX] = sum; } else { // reduce all sums in a block extern __shared__ uint8_t shared_storage[]; volatile T* thread_sum = reinterpret_cast<T*>(shared_storage); thread_sum += THREADID_X * nr_thpf; thread_sum[tid] = sum; #pragma unroll for (uint32_t i = nr_thpf / 2; i; i >>= 1) { bool cond = nr_thpf >= i * 2 && tid < i; if (i >= WARP_SIZE) { __syncthreads(); } T v0 = thread_sum[tid], v1 = v0 + thread_sum[tid + i]; thread_sum[tid] = cond ? v1 : v0; } if (!tid) flt_grad[OUT_IDX] = thread_sum[0]; } } } // anonymous namespace template <typename T> void convolution3d::chanwise::run_bwd_filter( T* filter_grad, const T* src, const T* dst_grad, const Param& param, cudaStream_t stream) { void (*kern)(T*, const T*, const T*, Param) = NULL; uint32_t nr_thread = query_blocksize_for_kernel(kern_bwd_filter<T, 1024>), nr_thpf = std::min( nr_thread, std::max<uint32_t>( 1, param.out_d * param.out_h * param.out_w * param.batch / (BATCH_UNROLL * 16))); // find nearest power-of-2 of nr_thpf do { #define CK(_n) \ if (nr_thpf >= _n) { \ kern = kern_bwd_filter<T, _n>; \ nr_thpf = _n; \ break; \ } CK(1 << 10); CK(1 << 9); CK(1 << 8); CK(1 << 7); CK(1 << 6); CK(1 << 5); CK(1 << 4); CK(1 << 3); CK(1 << 2); CK(1 << 1); CK(1 << 0); #undef CK } while (0); megdnn_assert(kern); nr_thread = query_blocksize_for_kernel(kern); uint32_t nr_flt_per_blk = nr_thread / nr_thpf; while (nr_flt_per_blk * nr_thpf % WARP_SIZE) --nr_flt_per_blk; megdnn_assert(nr_flt_per_blk); int nr_block = DIVUP( param.flt_d * param.flt_h * param.flt_w * param.src_chl * param.chl_mul, nr_flt_per_blk); nr_thread = nr_flt_per_blk * nr_thpf; uint32_t shared = nr_thread * 2 * sizeof(T); kern<<<nr_block, nr_thread, shared, stream>>>(filter_grad, src, dst_grad, param); after_kernel_launch(); } namespace megdnn { namespace cuda { namespace convolution3d { namespace chanwise { #define DO_INST(_ct) \ template void run_bwd_filter( \ _ct*, const _ct*, const _ct*, const Param&, cudaStream_t); #define INST(_dt) DO_INST(DTypeTrait<_dt>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(INST) #undef INST #undef DO_INST } // namespace chanwise } // namespace convolution3d } // namespace cuda } // namespace megdnn // vim: syntax=cuda.doxygen
b4117272d3c0a696afe7dd2eb354c08fb2a913cf.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * See COPYRIGHT.txt for license information */ #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <unistd.h> #include "utils.h" #define THREADS 512 #define MAX_MSG_SIZE 16 * 1024 #define UNROLL 8 __global__ void ping_pong(volatile int *data_d, volatile int *flag_d, int len, int pe, int iter, int skip) { long long int start, stop; double usec, time; int i, j, tid, peer; peer = !pe; tid = threadIdx.x; for (i = 0; i < (iter + skip); i++) { if (i == skip) start = clock64(); if (pe) { if (!tid) { nvshmem_int_wait_until((int *)flag_d, NVSHMEM_CMP_EQ, (i + 1)); } __syncthreads(); for (j = tid; j < len; j += THREADS) { nvshmem_int_p((int *)data_d + j, *(data_d + j), peer); } __syncthreads(); if (!tid) { nvshmem_fence(); nvshmem_int_p((int *)flag_d, (i + 1), peer); } __syncthreads(); } else { for (j = tid; j < len; j += THREADS) { nvshmem_int_p((int *)data_d + j, *(data_d + j), peer); } __syncthreads(); if (!tid) { nvshmem_fence(); nvshmem_int_p((int *)flag_d, (i + 1), peer); } __syncthreads(); if (!tid) { nvshmem_int_wait_until((int *)flag_d, NVSHMEM_CMP_EQ, (i + 1)); } __syncthreads(); } } stop = clock64(); if(!tid) nvshmem_quiet(); if ((pe == 0) && !tid) { time = (stop - start) / iter; usec = time * 1000 / clockrate; printf("%7lu \t %8.2f \n", len * sizeof(int), usec); } } int main(int c, char *v[]) { int mype, npes, size; int *flag_d = NULL, *data_d = NULL; sleep(10); int iter = 500; int skip = 50; int max_msg_size = MAX_MSG_SIZE; init_wrapper(&c, &v); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); if (npes != 2) { fprintf(stderr, "This test requires exactly two processes \n"); goto finalize; } data_d = (int *)nvshmem_malloc(max_msg_size); flag_d = (int *)nvshmem_malloc(sizeof(int)); CUDA_CHECK(hipMemset(data_d, 0, max_msg_size)); nvshmem_barrier_all(); CUDA_CHECK(hipDeviceSynchronize()); if (mype == 0) { printf("Note: This test measures full round-trip latency\n"); printf(" size(bytes) \t latency(us)\n"); fflush(stdout); } for (size = sizeof(int); size <= max_msg_size; size *= 2) { int nelems, status = 0; nelems = size / sizeof(int); void *args[6] = {&data_d, &flag_d, &nelems, &mype, &iter, &skip}; CUDA_CHECK(hipMemset(flag_d, 0, sizeof(int))); CUDA_CHECK(hipDeviceSynchronize()); nvshmem_barrier_all(); status = nvshmemx_collective_launch((const void *)ping_pong, 1, THREADS, args, 0, 0); if (status != NVSHMEMX_SUCCESS) { printf("shmemx_collective_launch failed %d \n", status); exit(-1); } CUDA_CHECK(hipDeviceSynchronize()); nvshmem_barrier_all(); } finalize: if (data_d) nvshmem_free(data_d); if (flag_d) nvshmem_free(flag_d); finalize_wrapper(); return 0; }
b4117272d3c0a696afe7dd2eb354c08fb2a913cf.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * See COPYRIGHT.txt for license information */ #include <stdio.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #include <unistd.h> #include "utils.h" #define THREADS 512 #define MAX_MSG_SIZE 16 * 1024 #define UNROLL 8 __global__ void ping_pong(volatile int *data_d, volatile int *flag_d, int len, int pe, int iter, int skip) { long long int start, stop; double usec, time; int i, j, tid, peer; peer = !pe; tid = threadIdx.x; for (i = 0; i < (iter + skip); i++) { if (i == skip) start = clock64(); if (pe) { if (!tid) { nvshmem_int_wait_until((int *)flag_d, NVSHMEM_CMP_EQ, (i + 1)); } __syncthreads(); for (j = tid; j < len; j += THREADS) { nvshmem_int_p((int *)data_d + j, *(data_d + j), peer); } __syncthreads(); if (!tid) { nvshmem_fence(); nvshmem_int_p((int *)flag_d, (i + 1), peer); } __syncthreads(); } else { for (j = tid; j < len; j += THREADS) { nvshmem_int_p((int *)data_d + j, *(data_d + j), peer); } __syncthreads(); if (!tid) { nvshmem_fence(); nvshmem_int_p((int *)flag_d, (i + 1), peer); } __syncthreads(); if (!tid) { nvshmem_int_wait_until((int *)flag_d, NVSHMEM_CMP_EQ, (i + 1)); } __syncthreads(); } } stop = clock64(); if(!tid) nvshmem_quiet(); if ((pe == 0) && !tid) { time = (stop - start) / iter; usec = time * 1000 / clockrate; printf("%7lu \t %8.2f \n", len * sizeof(int), usec); } } int main(int c, char *v[]) { int mype, npes, size; int *flag_d = NULL, *data_d = NULL; sleep(10); int iter = 500; int skip = 50; int max_msg_size = MAX_MSG_SIZE; init_wrapper(&c, &v); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); if (npes != 2) { fprintf(stderr, "This test requires exactly two processes \n"); goto finalize; } data_d = (int *)nvshmem_malloc(max_msg_size); flag_d = (int *)nvshmem_malloc(sizeof(int)); CUDA_CHECK(cudaMemset(data_d, 0, max_msg_size)); nvshmem_barrier_all(); CUDA_CHECK(cudaDeviceSynchronize()); if (mype == 0) { printf("Note: This test measures full round-trip latency\n"); printf(" size(bytes) \t latency(us)\n"); fflush(stdout); } for (size = sizeof(int); size <= max_msg_size; size *= 2) { int nelems, status = 0; nelems = size / sizeof(int); void *args[6] = {&data_d, &flag_d, &nelems, &mype, &iter, &skip}; CUDA_CHECK(cudaMemset(flag_d, 0, sizeof(int))); CUDA_CHECK(cudaDeviceSynchronize()); nvshmem_barrier_all(); status = nvshmemx_collective_launch((const void *)ping_pong, 1, THREADS, args, 0, 0); if (status != NVSHMEMX_SUCCESS) { printf("shmemx_collective_launch failed %d \n", status); exit(-1); } CUDA_CHECK(cudaDeviceSynchronize()); nvshmem_barrier_all(); } finalize: if (data_d) nvshmem_free(data_d); if (flag_d) nvshmem_free(flag_d); finalize_wrapper(); return 0; }
cb559f2136fba3391f60e44183f5453ff8ae652c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef M_PI #define M_PI 3.141592653589793F /* pi */ #endif #ifndef M_EPS #define M_EPS 1.01e-3F /* epsilon */ #endif #include "svd3_cuda.h" struct point3d { float x=0, y=0, z=0; }; // database: B*N*3, (x,y,z) // query: B*M*3, (x,y,z) // nnIndex: B*M*K // nnCount: B*M // nnDist: B*M*K // filtIndex: B*M*K __global__ void build_spherical_kernel(const int B, const int N, const int M, const int K, const int n, const int p, const int q, const float radius, const float* database, const float* query, const int* nnIndex, const int* nnCount, const float* nnDist, int* filtIndex) { // get the neighbor indices point3d ptQuery, pt, delta; for(int i=blockIdx.x;i<B;i+=gridDim.x) //for batch { for(int j=threadIdx.x;j<M;j+=blockDim.x) //for query point { int qf = i*M*3+j*3;//query ponit offset of query ptQuery.x = query[qf];//query shape is B*M*3 ptQuery.y = query[qf+1]; ptQuery.z = query[qf+2]; int nnSize = nnCount[i*M+j]; int bf = i*N*3; //batch offset of database int bfi = i*M*K+j*K;//batch and point offset of nnIndex bool transbool = true; // transbool = true; float trans[9] = {0}; if(transbool) { float mean[3] = {0,0,0}; //mean(x) // for(int k=0;k<nnSize;k++) // { // int ptID = nnIndex[bfi+k]; // input point ID // mean[0] += database[bf+ptID*3]; // mean[1] += database[bf+ptID*3+1]; // mean[2] += database[bf+ptID*3+2]; // } // for(int i=0;i<3;i++) // mean[i] = mean[i]/nnSize; //cov = (x-mean)'*(x-mean)/nnSize float cov[9]={0}; for(int i = 0;i<3;i++) { for(int j = i; j <3;j++) { for(int k = 0;k < nnSize;k++) //database[3*k:3*k+2] a point { int ptID = nnIndex[bfi+k]; // cov[i*3+j] += (database[bf+ptID*3+i]-mean[i])*(database[bf+ptID*3+j]-mean[j]); cov[i*3+j] += (database[bf+ptID*3+i]-query[qf+i])*(database[bf+ptID*3+j]-query[qf+j]); } cov[i+j*3] = cov[i*3+j]; } } for(int i =0;i<9;i++) cov[i] = cov[i]/nnSize; //find max dist point int Id = 0; float maxdis = 0; for(int k=0;k<nnSize;k++) { if(nnDist[bfi+k]>maxdis) { maxdis = nnDist[bfi+k]; Id = k; } } Id = nnIndex[bfi+Id]; //calc Rotate Mat calcRotateMat(trans,&database[bf+Id*3],&query[qf],cov); } //find bins with trans for(int k=0;k<nnSize;k++) { int ptID = nnIndex[bfi+k]; // input point ID? float deltaxyz[3] = {0}; pt.x = database[bf+ptID*3]; // the neighbor points pt.y = database[bf+ptID*3+1]; pt.z = database[bf+ptID*3+2]; delta.x = pt.x - ptQuery.x; delta.y = pt.y - ptQuery.y; delta.z = pt.z - ptQuery.z; if(transbool){ //delta = delta*trans for(int r = 0;r<3;r++) deltaxyz[r] = delta.x*trans[r]+delta.y*trans[r+3]+delta.z*trans[r+6]; delta.z = deltaxyz[0]; delta.y = deltaxyz[1]; delta.x = deltaxyz[2]; } //find bins float dist = nnDist[bfi+k]; float dist2D = delta.x*delta.x + delta.y*delta.y; dist2D = sqrtf(dist2D); filtIndex[bfi+k] = 0; if (dist>M_EPS && fabs(dist-M_EPS)>1e-6) // update the bin index { float theta = atan2f(delta.y, delta.x); float phi = atan2f(delta.z, dist2D); theta = theta<M_PI?theta:(-M_PI); theta = theta>(-M_PI)?theta:(-M_PI); theta += M_PI; phi = phi<(M_PI/2)?phi:(M_PI/2); phi = phi>(-M_PI/2)?phi:(-M_PI/2); phi += M_PI/2; float alpha = theta*n/2/M_PI; float beta = phi*p/M_PI; float gamma = dist*q/(radius+1e-6F); int nID = min(n-1, int(alpha)); int pID = min(p-1, int(beta)); int qID = min(q-1, int(gamma)); filtIndex[bfi+k] = qID*p*n + pID*n + nID + 1; } } } } } void sphericalKernelLauncher(int B, int N, int M, int K, int n, int p, int q, float radius, const float* database, const float* query, const int* nnIndex, const int* nnCount, const float* nnDist, int* filtIndex) { hipLaunchKernelGGL(( build_spherical_kernel), dim3(32),dim3(1024), 0, 0, B, N, M, K, n, p, q, radius, database, query, nnIndex, nnCount, nnDist, filtIndex); }
cb559f2136fba3391f60e44183f5453ff8ae652c.cu
#ifndef M_PI #define M_PI 3.141592653589793F /* pi */ #endif #ifndef M_EPS #define M_EPS 1.01e-3F /* epsilon */ #endif #include "svd3_cuda.h" struct point3d { float x=0, y=0, z=0; }; // database: B*N*3, (x,y,z) // query: B*M*3, (x,y,z) // nnIndex: B*M*K // nnCount: B*M // nnDist: B*M*K // filtIndex: B*M*K __global__ void build_spherical_kernel(const int B, const int N, const int M, const int K, const int n, const int p, const int q, const float radius, const float* database, const float* query, const int* nnIndex, const int* nnCount, const float* nnDist, int* filtIndex) { // get the neighbor indices point3d ptQuery, pt, delta; for(int i=blockIdx.x;i<B;i+=gridDim.x) //for batch { for(int j=threadIdx.x;j<M;j+=blockDim.x) //for query point { int qf = i*M*3+j*3;//query ponit offset of query ptQuery.x = query[qf];//query shape is B*M*3 ptQuery.y = query[qf+1]; ptQuery.z = query[qf+2]; int nnSize = nnCount[i*M+j]; int bf = i*N*3; //batch offset of database int bfi = i*M*K+j*K;//batch and point offset of nnIndex bool transbool = true; // transbool = true; float trans[9] = {0}; if(transbool) { float mean[3] = {0,0,0}; //mean(x) // for(int k=0;k<nnSize;k++) // { // int ptID = nnIndex[bfi+k]; // input point ID // mean[0] += database[bf+ptID*3]; // mean[1] += database[bf+ptID*3+1]; // mean[2] += database[bf+ptID*3+2]; // } // for(int i=0;i<3;i++) // mean[i] = mean[i]/nnSize; //cov = (x-mean)'*(x-mean)/nnSize float cov[9]={0}; for(int i = 0;i<3;i++) { for(int j = i; j <3;j++) { for(int k = 0;k < nnSize;k++) //database[3*k:3*k+2] a point { int ptID = nnIndex[bfi+k]; // cov[i*3+j] += (database[bf+ptID*3+i]-mean[i])*(database[bf+ptID*3+j]-mean[j]); cov[i*3+j] += (database[bf+ptID*3+i]-query[qf+i])*(database[bf+ptID*3+j]-query[qf+j]); } cov[i+j*3] = cov[i*3+j]; } } for(int i =0;i<9;i++) cov[i] = cov[i]/nnSize; //find max dist point int Id = 0; float maxdis = 0; for(int k=0;k<nnSize;k++) { if(nnDist[bfi+k]>maxdis) { maxdis = nnDist[bfi+k]; Id = k; } } Id = nnIndex[bfi+Id]; //calc Rotate Mat calcRotateMat(trans,&database[bf+Id*3],&query[qf],cov); } //find bins with trans for(int k=0;k<nnSize;k++) { int ptID = nnIndex[bfi+k]; // input point ID? float deltaxyz[3] = {0}; pt.x = database[bf+ptID*3]; // the neighbor points pt.y = database[bf+ptID*3+1]; pt.z = database[bf+ptID*3+2]; delta.x = pt.x - ptQuery.x; delta.y = pt.y - ptQuery.y; delta.z = pt.z - ptQuery.z; if(transbool){ //delta = delta*trans for(int r = 0;r<3;r++) deltaxyz[r] = delta.x*trans[r]+delta.y*trans[r+3]+delta.z*trans[r+6]; delta.z = deltaxyz[0]; delta.y = deltaxyz[1]; delta.x = deltaxyz[2]; } //find bins float dist = nnDist[bfi+k]; float dist2D = delta.x*delta.x + delta.y*delta.y; dist2D = sqrtf(dist2D); filtIndex[bfi+k] = 0; if (dist>M_EPS && fabs(dist-M_EPS)>1e-6) // update the bin index { float theta = atan2f(delta.y, delta.x); float phi = atan2f(delta.z, dist2D); theta = theta<M_PI?theta:(-M_PI); theta = theta>(-M_PI)?theta:(-M_PI); theta += M_PI; phi = phi<(M_PI/2)?phi:(M_PI/2); phi = phi>(-M_PI/2)?phi:(-M_PI/2); phi += M_PI/2; float alpha = theta*n/2/M_PI; float beta = phi*p/M_PI; float gamma = dist*q/(radius+1e-6F); int nID = min(n-1, int(alpha)); int pID = min(p-1, int(beta)); int qID = min(q-1, int(gamma)); filtIndex[bfi+k] = qID*p*n + pID*n + nID + 1; } } } } } void sphericalKernelLauncher(int B, int N, int M, int K, int n, int p, int q, float radius, const float* database, const float* query, const int* nnIndex, const int* nnCount, const float* nnDist, int* filtIndex) { build_spherical_kernel<<<32,1024>>>(B, N, M, K, n, p, q, radius, database, query, nnIndex, nnCount, nnDist, filtIndex); }
bdd8c649dcfa39d708cc3efcbf7d43208c357676.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ //! OpenDTAM Variant of Chambolle & Pock denoising //! //! The complicated half of the DTAM algorithm's mapping core, //! but can be used independently to refine depthmaps. //! //! Written by Paul Foster for GSoC 2014 OpenDTAM project. //! High level algorithm described by Richard Newcombe, Steven J. Lovegrove, and Andrew J. Davison. //! "DTAM: Dense tracking and mapping in real-time." //! Which was in turn based on Chambolle & Pock's //! "A first-order primal-dual algorithm for convex problems with applications to imaging." #include <opencv2/core/cuda/common.hpp>//for cudaSafeCall,CV_Assert #include "DepthmapDenoiseWeightedHuber.cuh" namespace cv { namespace cuda { namespace device { namespace dtam_denoise{ static unsigned int arows;//TODO:make sure this is still reentrant void loadConstants(uint h_rows, uint, uint , uint , float* , float* , float* , float* , float* , float*) { arows=h_rows; } hipStream_t localStream=0; const int BLOCKX2D=32; const int BLOCKY2D=32; #define GENERATE_CUDA_FUNC2D(funcName,arglist,notypes) \ static __global__ void funcName arglist; \ void funcName##Caller arglist{ \ dim3 dimBlock(BLOCKX2D,BLOCKY2D); \ dim3 dimGrid((acols + dimBlock.x - 1) / dimBlock.x, \ (arows + dimBlock.y - 1) / dimBlock.y); \ hipLaunchKernelGGL(( funcName), dim3(dimGrid), dim3(dimBlock),0,localStream, notypes; \ cudaSafeCall hipGetLastError() );\ };static __global__ void funcName arglist #define GENERATE_CUDA_FUNC2DROWS(funcName,arglist,notypes) \ static __global__ void funcName arglist; \ void funcName##Caller arglist{ \ dim3 dimBlock(BLOCKX2D,BLOCKY2D); \ dim3 dimGrid(1, \ (arows + dimBlock.y - 1) / dimBlock.y); \ hipLaunchKernelGGL(( funcName), dim3(dimGrid), dim3(dimBlock),0,localStream, notypes; \ cudaSafeCall hipGetLastError() );\ };static __global__ void funcName arglist static __global__ void computeG1 (float* pp, float* g1p, float* gxp, float* gyp, int cols); static __global__ void computeG2 (float* pp, float* g1p, float* gxp, float* gyp, int cols); void computeGCaller (float* pp, float* g1p, float* gxp, float* gyp, int cols){ // dim3 dimBlock(BLOCKX2D,BLOCKY2D); dim3 dimBlock(BLOCKX2D,4); dim3 dimGrid(1, (arows + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( computeG1), dim3(dimGrid), dim3(dimBlock),0,localStream, pp, g1p, gxp, gyp, cols); hipDeviceSynchronize(); hipLaunchKernelGGL(( computeG2), dim3(dimGrid), dim3(dimBlock),0,localStream, pp, g1p, gxp, gyp, cols); hipDeviceSynchronize(); cudaSafeCall( hipGetLastError() ); }; GENERATE_CUDA_FUNC2DROWS(computeG1, (float* pp, float* g1p, float* gxp, float* gyp, int cols), (pp, g1p, gxp, gyp, cols)) { #if __CUDA_ARCH__>300 //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch //Original pseudocode for this function: // //subscripts u,d,l,r mean up,down,left,right // void computeG(){ // // g0 is the strongest nearby gradient (excluding point defects) // g0x=fabsf(pr-pl);//|dx| // g0y=fabsf(pd-pu);//|dy| // g0=max(g0x,g0y); // // g1 is the scaled g0 through the g function exp(-alpha*x^beta) // g1=sqrt(g0); //beta=0.5 // alpha=3.5; // g1=exp(-alpha*g1); // //hard to explain this without a picture, but breaks are where both neighboring pixels are near a change // gx=max(g1r,g1); // gy=max(g1d,g1); // gu=gyu; //upper spring is the lower spring of the pixel above // gd=gy; //lower spring // gr=gx; //right spring // gl=gxl; //left spring is the right spring of the pixel to the left // } const float alpha=3.5f; int x = threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int upoff=-(y!=0)*cols; int dnoff=(y<gridDim.y*blockDim.y-1)*cols; //itr0 int pt=x+y*cols; float ph,pn,pu,pd,pl,pr; float g0x,g0y,g0,g1,gt,gsav; float tmp; ph=pp[pt]; pn=pp[pt+blockDim.x]; pr=__shfl_down(ph,2); tmp=__shfl_up(pn,30); if(threadIdx.x>=30){ pr=tmp; } pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx no prior val gsav=__shfl_down(gt,31);//x000000 for next time g0x=threadIdx.x>0?g0x:0.0f;//0xxxxxx g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; x+=32; //itr 1:n-2 for(;x<cols-32;x+=32){ pt=x+y*cols; ph=pn; pn=pp[pt+blockDim.x]; pr=__shfl_down(ph,2); tmp=__shfl_up(pn,30); pr=threadIdx.x>=30?tmp:pr; pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx gsav=__shfl_down(gt,31);//x000000 for next time g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; } //itr n-1 pt=x+y*cols; ph=pn; pr=__shfl_down(ph,2); pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; #endif } GENERATE_CUDA_FUNC2DROWS(computeG2, (float* pp, float* g1p, float* gxp, float* gyp, int cols), (pp, g1p, gxp, gyp, cols)) { #if __CUDA_ARCH__>300 int x = threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int dnoff=(y<gridDim.y*blockDim.y-1)*cols; //itr0 int pt=x+y*cols; float g1h,g1n,g1u,g1d,g1r,g1l,gx,gy; float tmp; //part2, find gx,gy x = threadIdx.x; y = blockIdx.y * blockDim.y + threadIdx.y; //itr0 pt=x+y*cols; g1h=g1p[pt]; g1n=g1p[pt+blockDim.x]; g1r=__shfl_down(g1h,1); tmp=__shfl_up(g1n,31); if(threadIdx.x>=31){ g1r=tmp; } g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; x+=32; //itr 1:n-2 for(;x<cols-32;x+=32){ pt=x+y*cols; g1h=g1n; g1n=g1p[pt+blockDim.x]; g1r=__shfl_down(g1h,1); tmp=__shfl_up(g1n,31); g1r=threadIdx.x>=31?tmp:g1r; g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; } //itr n-1 pt=x+y*cols; g1h=g1n; g1r=__shfl_down(g1h,1); g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; #endif } //This version is faster, but makes synchronization errors at the lines between parts 1 and 2. //Could be fixed by a second pass for part 2 over the stitch lines, but I don't have time to figure that out //right now. GENERATE_CUDA_FUNC2DROWS(computeGunsafe, (float* pp, float* g1p, float* gxp, float* gyp, int cols), (pp, g1p, gxp, gyp, cols)) { #if __CUDA_ARCH__>300 //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch //TODO: rerun kernel on lines with y%32==31 or y%32==0 to fix stitch lines //Original pseudocode for this function: // //subscripts u,d,l,r mean up,down,left,right // void computeG(){ // // g0 is the strongest nearby gradient (excluding point defects) // g0x=fabsf(pr-pl);//|dx| // g0y=fabsf(pd-pu);//|dy| // g0=max(g0x,g0y); // // g1 is the scaled g0 through the g function exp(-alpha*x^beta) // g1=sqrt(g0); //beta=0.5 // alpha=3.5; // g1=exp(-alpha*g1); // //hard to explain this without a picture, but breaks are where both neighboring pixels are near a change // gx=max(g1r,g1); // gy=max(g1d,g1); // gu=gyu; //upper spring is the lower spring of the pixel above // gd=gy; //lower spring // gr=gx; //right spring // gl=gxl; //left spring is the right spring of the pixel to the left // } const float alpha=3.5f; int x = threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int upoff=-(y!=0)*cols; int dnoff=(y<gridDim.y*blockDim.y-1)*cols; //itr0 int pt=x+y*cols; float ph,pn,pu,pd,pl,pr; float g0x,g0y,g0,g1,g1h,g1n,g1u,g1d,g1r,g1l,gx,gy,gt,gsav; float tmp; ph=pp[pt]; pn=pp[pt+blockDim.x]; pr=__shfl_down(ph,2); tmp=__shfl_up(pn,30); if(threadIdx.x>=30){ pr=tmp; } pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx no prior val gsav=__shfl_down(gt,31);//x000000 for next time g0x=threadIdx.x>0?g0x:0.0f;//0xxxxxx g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; x+=32; //itr 1:n-2 for(;x<cols-32;x+=32){ pt=x+y*cols; ph=pn; pn=pp[pt+blockDim.x]; pr=__shfl_down(ph,2); tmp=__shfl_up(pn,30); pr=threadIdx.x>=30?tmp:pr; pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx gsav=__shfl_down(gt,31);//x000000 for next time g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; } //itr n-1 pt=x+y*cols; ph=pn; pr=__shfl_down(ph,2); pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; //part2, find gx,gy x = threadIdx.x; y = blockIdx.y * blockDim.y + threadIdx.y; //itr0 pt=x+y*cols; g1h=g1p[pt]; g1n=g1p[pt+blockDim.x]; g1r=__shfl_down(g1h,1); tmp=__shfl_up(g1n,31); if(threadIdx.x>=31){ g1r=tmp; } g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; x+=32; //itr 1:n-2 for(;x<cols-32;x+=32){ pt=x+y*cols; g1h=g1n; g1n=g1p[pt+blockDim.x]; g1r=__shfl_down(g1h,1); tmp=__shfl_up(g1n,31); g1r=threadIdx.x>=31?tmp:g1r; g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; } //itr n-1 pt=x+y*cols; g1h=g1n; g1r=__shfl_down(g1h,1); g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; #endif } __device__ inline float saturate(float x){ //return x; return x/fmaxf(1.0f,fabsf(x)); } // static __global__ void updateQD (float* gqxpt, float* gqypt, float *dpt, float * apt, // float *gxpt, float *gypt, float sigma_q, float sigma_d, float epsilon, // float theta);//DANGER, no interblock synchronization = weird instability static __global__ void updateQ (float* gqxpt, float* gqypt, float *dpt, float * apt, float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, float theta); static __global__ void updateD (float* gqxpt, float* gqypt, float *dpt, float * apt, float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, float theta); void updateQDCaller(float* gqxpt, float* gqypt, float *dpt, float * apt, float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, float theta) { dim3 dimBlock(BLOCKX2D, BLOCKY2D); dim3 dimGrid(1, (arows + dimBlock.y - 1) / dimBlock.y); CV_Assert(dimGrid.y>0); cudaSafeCall( hipGetLastError() ); hipLaunchKernelGGL(( updateQ), dim3(dimGrid), dim3(dimBlock),0,localStream, gqxpt, gqypt, dpt, apt, gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta); cudaSafeCall( hipGetLastError() ); hipLaunchKernelGGL(( updateD), dim3(dimGrid), dim3(dimBlock),0,localStream, gqxpt, gqypt, dpt, apt, gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta); cudaSafeCall( hipGetLastError() ); }; // static __global__ void updateQD (float* gqxpt, float* gqypt, float *dpt, float * apt, // float *gxpt, float *gypt, float sigma_q, float sigma_d, float epsilon, // float theta) { // //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch // // //Original pseudocode for this function: // //void updateQD(){ // // //shifts are shuffles! // // for (all x in blocks of warpsize;;){ // // //qx update // // float dh,dn,qxh,gx,gqx,qyh,gy,gqy; // // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) // // dr=dh<<1; // // tmp=dn>>31; // // if (rt) // // dr=tmp; // // qxh=gqx/gxh; // // qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) // // gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. // // gqxpt[pt]=gqx; // // // // //qy update // // s[bpt]=dn; // // if(!btm){ // // dd=s[bpt+bdnoff]; // // }else{ // // dd=dpt[pt+dnoff]; // // } // // qyh=gqy/gy; // // qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon); // // gqy=saturate(gyh*qyh); // // gqypt[pt]=gqy; // // // // //dx update // // gqr=gqx; // // gql=gqx>>1; // // if (lf) // // gql=gqsave; // // gqsave=gqx<<31;//save for next iter // // dacc = gqr - gql;//dx part // // // // //dy update and d store // // gqd=gqy; // // s[bpt]=gqy; // // if(!top) // // gqu=s[bpt+bupoff]; // // else // // gqu=gqxpt[pt + upoff]; // // dacc += gqd-gqu; //dy part // // d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta); // // dpt[pt]=d; // // } // //} // __shared__ float s[32*BLOCKY2D]; // int x = threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; // bool rt=x==31; // bool lf=x==0; // bool top=y==0; // bool btm=y==rows-1; // bool btop=threadIdx.y==0; // bool bbtm=threadIdx.y==blockDim.y-1; // int pt, bpt,bdnoff ,dnoff, bupoff, upoff; // // // float tmp,gqsave; // gqsave=0; // bpt = threadIdx.x+threadIdx.y*blockDim.x; // bdnoff=blockDim.x; // dnoff=(!btm)*cols; // bupoff=-blockDim.x; // upoff=-(!top)*cols; // // pt=x+y*cols; // // float dh,dn; // dn=dpt[pt]; // // for(;x<cols;x+=32){ // float qx,gx,gqx,qy,gy,gqy; // pt=x+y*cols; // // // //qx update // { // float dr; // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) // // //load // { // dh=dn; // if(x<cols-32){ // dn=dpt[pt+32]; // // } // gqx=gqxpt[pt]; // gx=gxpt[pt]; // // gx=1.0f; // // } // // dr=__shfl_down(dh,1); // tmp=__shfl_up(dn,31); // if (rt && x<cols-32) // dr=tmp; // qx = gqx/gx; // qx = (qx+sigma_q*gx*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) // gqx = saturate(gx*qx);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. // //gqxpt[pt]=gqx; // } // dpt[pt] = dh; // //qy update // { // float dd; // //load // { // gqy=gqypt[pt]; // gy=gypt[pt]; // // gy=1.0f; // } // s[bpt]=dh; // __syncthreads(); // if(!bbtm){ // dd=s[bpt+bdnoff]; // }else{ // dd=dpt[pt+dnoff]; // } // qy = gqy/gy; // qy = (qy+sigma_q*gy*(dd-dh))/(1+sigma_q*epsilon); // gqy = saturate(gy*qy); // //gqypt[pt]=gqy; // } // float dacc; // //dx update // { // float gqr,gql; // gqr=gqx; // gql=__shfl_up(gqx,1); // if (lf) // gql=gqsave; // gqsave=__shfl_down(gqx,31);//save for next iter // dacc = gqr - gql;//dx part // } // float d=dh; // //dy update and d store // { // float a; // //load // { // a=apt[pt]; // } // float gqu,gqd; // // gqd=gqy; // s[bpt]=gqy; // __syncthreads(); // if(!btop) // gqu=s[bpt+bupoff]; // else // gqu=gqypt[pt + upoff]; // if(y==0) // gqu=0.0f; // dacc += gqd-gqu; //dy part // d = ( d + sigma_d*(dacc + a/theta) ) / (1 + sigma_d/theta); // //dpt[pt] = d; // } // __syncthreads(); // gqxpt[pt]=gqx; // gqypt[pt]=gqy; // dpt[pt] = d; // __syncthreads(); // } // } GENERATE_CUDA_FUNC2DROWS(updateQ, (float* gqxpt, float* gqypt, float *dpt, float * apt, float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, float theta), ( gqxpt, gqypt, dpt, apt, gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta)) { //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch //Original pseudocode for this function: //void updateQD(){ // //shifts are shuffles! // for (all x in blocks of warpsize;;){ // //qx update // float dh,dn,qxh,gx,gqx,qyh,gy,gqy; // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) // dr=dh<<1; // tmp=dn>>31; // if (rt) // dr=tmp; // qxh=gqx/gxh; // qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) // gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. // gqxpt[pt]=gqx; // // //qy update // s[bpt]=dn; // if(!btm){ // dd=s[bpt+bdnoff]; // }else{ // dd=dpt[pt+dnoff]; // } // qyh=gqy/gy; // qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon); // gqy=saturate(gyh*qyh); // gqypt[pt]=gqy; // // //dx update // gqr=gqx; // gql=gqx>>1; // if (lf) // gql=gqsave; // gqsave=gqx<<31;//save for next iter // dacc = gqr - gql;//dx part // // //dy update and d store // gqd=gqy; // s[bpt]=gqy; // if(!top) // gqu=s[bpt+bupoff]; // else // gqu=gqxpt[pt + upoff]; // dacc += gqd-gqu; //dy part // d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta); // dpt[pt]=d; // } //} #if __CUDA_ARCH__>300 __shared__ float s[32*BLOCKY2D]; int x = threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; bool rt=x==31; bool bbtm=threadIdx.y==blockDim.y-1; int pt, bpt,bdnoff ,dnoff; float tmp; bpt = threadIdx.x+threadIdx.y*blockDim.x; bdnoff=blockDim.x; dnoff=(y<gridDim.y*blockDim.y-1)*cols; pt=x+y*cols; float dh,dn; dn=dpt[pt]; for(;x<cols;x+=32){ float qx,gx,gqx,qy,gy,gqy; pt=x+y*cols; //qx update { float dr; //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) //load { dh=dn; if(x<cols-32){ dn=dpt[pt+32]; } gqx=gqxpt[pt]; gx=gxpt[pt]+.01f; // gx=1.0f; } dr=__shfl_down(dh,1); tmp=__shfl_up(dn,31); if (rt && x<cols-32) dr=tmp; qx = gqx/gx; //qx+=(gx*(dr-dh)-epsilon*qx)*.5f;//simplified step qx = (qx+sigma_q*gx*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) gqx = saturate(gx*qx);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. gqxpt[pt]=gqx; } //qy update { float dd; //load { gqy=gqypt[pt]; gy=gypt[pt]+.01f; // gy=1.0f; } s[bpt]=dh; __syncthreads(); if(!bbtm) dd=s[bpt+bdnoff]; else dd=dpt[pt+dnoff]; __syncthreads(); qy = gqy/gy; //qy+=(gy*(dd-dh)-epsilon*qy)*.5f;//simplified step qy = (qy+sigma_q*gy*(dd-dh))/(1+sigma_q*epsilon); gqy = saturate(gy*qy); gqypt[pt]=gqy; } //__syncthreads(); } #endif } GENERATE_CUDA_FUNC2DROWS(updateD, (float* gqxpt, float* gqypt, float *dpt, float * apt, float *gxpt, float *gypt,int cols, float sigma_q, float sigma_d, float epsilon, float theta), ( gqxpt, gqypt, dpt, apt, gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta)) { #if __CUDA_ARCH__>300 //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch //Original pseudocode for this function: //void updateQD(){ // //shifts are shuffles! // for (all x in blocks of warpsize){ // //qx update // float dh,dn,qxh,gx,gqx,qyh,gy,gqy; // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) // dr=dh<<1; // tmp=dn>>31; // if (rt) // dr=tmp; // qxh=gqx/gxh; // qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) // gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. // gqxpt[pt]=gqx; // // //qy update // s[bpt]=dn; // if(!btm){ // dd=s[bpt+bdnoff]; // }else{ // dd=dpt[pt+dnoff]; // } // qyh=gqy/gy; // qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon); // gqy=saturate(gyh*qyh); // gqypt[pt]=gqy; // // //dx update // gqr=gqx; // gql=gqx>>1; // if (lf) // gql=gqsave; // gqsave=gqx<<31;//save for next iter // dacc = gqr - gql;//dx part // // //dy update and d store // gqd=gqy; // s[bpt]=gqy; // if(!top) // gqu=s[bpt+bupoff]; // else // gqu=gqxpt[pt + upoff]; // dacc += gqd-gqu; //dy part // d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta); // dpt[pt]=d; // } //} __shared__ float s[32*BLOCKY2D]; int x = threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; bool lf=x==0; bool top=y==0; bool btop=threadIdx.y==0; int pt, bpt, bupoff, upoff; float gqsave=0; bpt = threadIdx.x+threadIdx.y*blockDim.x; bupoff=-blockDim.x; upoff=-(!top)*cols; pt=x+y*cols; for(;x<cols;x+=32){ float gqx,gqy; pt=x+y*cols; float dacc; //dx update { float gqr,gql; gqr=gqx=gqxpt[pt]; gql=__shfl_up(gqx,1); if (lf) gql=gqsave; gqsave=__shfl_down(gqx,31);//save for next iter dacc = gqr - gql;//dx part } //dy update and d store { float a; //load { a=apt[pt]; } float gqu,gqd; float d=dpt[pt]; gqd=gqy=gqypt[pt]; s[bpt]=gqy; __syncthreads(); if(!btop) gqu=s[bpt+bupoff]; else gqu=gqypt[pt + upoff]; if(y==0) gqu=0.0f; dacc += gqd-gqu; //dy part //d += dacc*.5f;//simplified step d = ( d + sigma_d*(dacc + a/theta) ) / (1 + sigma_d/theta); dpt[pt] = d; } __syncthreads();//can't figure out why this is needed, but it is to avoid subtle errors in Qy at the ends of the warp } #endif } }}}}
bdd8c649dcfa39d708cc3efcbf7d43208c357676.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ //! OpenDTAM Variant of Chambolle & Pock denoising //! //! The complicated half of the DTAM algorithm's mapping core, //! but can be used independently to refine depthmaps. //! //! Written by Paul Foster for GSoC 2014 OpenDTAM project. //! High level algorithm described by Richard Newcombe, Steven J. Lovegrove, and Andrew J. Davison. //! "DTAM: Dense tracking and mapping in real-time." //! Which was in turn based on Chambolle & Pock's //! "A first-order primal-dual algorithm for convex problems with applications to imaging." #include <opencv2/core/cuda/common.hpp>//for cudaSafeCall,CV_Assert #include "DepthmapDenoiseWeightedHuber.cuh" namespace cv { namespace cuda { namespace device { namespace dtam_denoise{ static unsigned int arows;//TODO:make sure this is still reentrant void loadConstants(uint h_rows, uint, uint , uint , float* , float* , float* , float* , float* , float*) { arows=h_rows; } cudaStream_t localStream=0; const int BLOCKX2D=32; const int BLOCKY2D=32; #define GENERATE_CUDA_FUNC2D(funcName,arglist,notypes) \ static __global__ void funcName arglist; \ void funcName##Caller arglist{ \ dim3 dimBlock(BLOCKX2D,BLOCKY2D); \ dim3 dimGrid((acols + dimBlock.x - 1) / dimBlock.x, \ (arows + dimBlock.y - 1) / dimBlock.y); \ funcName<<<dimGrid, dimBlock,0,localStream>>>notypes; \ cudaSafeCall( cudaGetLastError() );\ };static __global__ void funcName arglist #define GENERATE_CUDA_FUNC2DROWS(funcName,arglist,notypes) \ static __global__ void funcName arglist; \ void funcName##Caller arglist{ \ dim3 dimBlock(BLOCKX2D,BLOCKY2D); \ dim3 dimGrid(1, \ (arows + dimBlock.y - 1) / dimBlock.y); \ funcName<<<dimGrid, dimBlock,0,localStream>>>notypes; \ cudaSafeCall( cudaGetLastError() );\ };static __global__ void funcName arglist static __global__ void computeG1 (float* pp, float* g1p, float* gxp, float* gyp, int cols); static __global__ void computeG2 (float* pp, float* g1p, float* gxp, float* gyp, int cols); void computeGCaller (float* pp, float* g1p, float* gxp, float* gyp, int cols){ // dim3 dimBlock(BLOCKX2D,BLOCKY2D); dim3 dimBlock(BLOCKX2D,4); dim3 dimGrid(1, (arows + dimBlock.y - 1) / dimBlock.y); computeG1<<<dimGrid, dimBlock,0,localStream>>>(pp, g1p, gxp, gyp, cols); cudaDeviceSynchronize(); computeG2<<<dimGrid, dimBlock,0,localStream>>>(pp, g1p, gxp, gyp, cols); cudaDeviceSynchronize(); cudaSafeCall( cudaGetLastError() ); }; GENERATE_CUDA_FUNC2DROWS(computeG1, (float* pp, float* g1p, float* gxp, float* gyp, int cols), (pp, g1p, gxp, gyp, cols)) { #if __CUDA_ARCH__>300 //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch //Original pseudocode for this function: // //subscripts u,d,l,r mean up,down,left,right // void computeG(){ // // g0 is the strongest nearby gradient (excluding point defects) // g0x=fabsf(pr-pl);//|dx| // g0y=fabsf(pd-pu);//|dy| // g0=max(g0x,g0y); // // g1 is the scaled g0 through the g function exp(-alpha*x^beta) // g1=sqrt(g0); //beta=0.5 // alpha=3.5; // g1=exp(-alpha*g1); // //hard to explain this without a picture, but breaks are where both neighboring pixels are near a change // gx=max(g1r,g1); // gy=max(g1d,g1); // gu=gyu; //upper spring is the lower spring of the pixel above // gd=gy; //lower spring // gr=gx; //right spring // gl=gxl; //left spring is the right spring of the pixel to the left // } const float alpha=3.5f; int x = threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int upoff=-(y!=0)*cols; int dnoff=(y<gridDim.y*blockDim.y-1)*cols; //itr0 int pt=x+y*cols; float ph,pn,pu,pd,pl,pr; float g0x,g0y,g0,g1,gt,gsav; float tmp; ph=pp[pt]; pn=pp[pt+blockDim.x]; pr=__shfl_down(ph,2); tmp=__shfl_up(pn,30); if(threadIdx.x>=30){ pr=tmp; } pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx no prior val gsav=__shfl_down(gt,31);//x000000 for next time g0x=threadIdx.x>0?g0x:0.0f;//0xxxxxx g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; x+=32; //itr 1:n-2 for(;x<cols-32;x+=32){ pt=x+y*cols; ph=pn; pn=pp[pt+blockDim.x]; pr=__shfl_down(ph,2); tmp=__shfl_up(pn,30); pr=threadIdx.x>=30?tmp:pr; pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx gsav=__shfl_down(gt,31);//x000000 for next time g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; } //itr n-1 pt=x+y*cols; ph=pn; pr=__shfl_down(ph,2); pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; #endif } GENERATE_CUDA_FUNC2DROWS(computeG2, (float* pp, float* g1p, float* gxp, float* gyp, int cols), (pp, g1p, gxp, gyp, cols)) { #if __CUDA_ARCH__>300 int x = threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int dnoff=(y<gridDim.y*blockDim.y-1)*cols; //itr0 int pt=x+y*cols; float g1h,g1n,g1u,g1d,g1r,g1l,gx,gy; float tmp; //part2, find gx,gy x = threadIdx.x; y = blockIdx.y * blockDim.y + threadIdx.y; //itr0 pt=x+y*cols; g1h=g1p[pt]; g1n=g1p[pt+blockDim.x]; g1r=__shfl_down(g1h,1); tmp=__shfl_up(g1n,31); if(threadIdx.x>=31){ g1r=tmp; } g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; x+=32; //itr 1:n-2 for(;x<cols-32;x+=32){ pt=x+y*cols; g1h=g1n; g1n=g1p[pt+blockDim.x]; g1r=__shfl_down(g1h,1); tmp=__shfl_up(g1n,31); g1r=threadIdx.x>=31?tmp:g1r; g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; } //itr n-1 pt=x+y*cols; g1h=g1n; g1r=__shfl_down(g1h,1); g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; #endif } //This version is faster, but makes synchronization errors at the lines between parts 1 and 2. //Could be fixed by a second pass for part 2 over the stitch lines, but I don't have time to figure that out //right now. GENERATE_CUDA_FUNC2DROWS(computeGunsafe, (float* pp, float* g1p, float* gxp, float* gyp, int cols), (pp, g1p, gxp, gyp, cols)) { #if __CUDA_ARCH__>300 //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch //TODO: rerun kernel on lines with y%32==31 or y%32==0 to fix stitch lines //Original pseudocode for this function: // //subscripts u,d,l,r mean up,down,left,right // void computeG(){ // // g0 is the strongest nearby gradient (excluding point defects) // g0x=fabsf(pr-pl);//|dx| // g0y=fabsf(pd-pu);//|dy| // g0=max(g0x,g0y); // // g1 is the scaled g0 through the g function exp(-alpha*x^beta) // g1=sqrt(g0); //beta=0.5 // alpha=3.5; // g1=exp(-alpha*g1); // //hard to explain this without a picture, but breaks are where both neighboring pixels are near a change // gx=max(g1r,g1); // gy=max(g1d,g1); // gu=gyu; //upper spring is the lower spring of the pixel above // gd=gy; //lower spring // gr=gx; //right spring // gl=gxl; //left spring is the right spring of the pixel to the left // } const float alpha=3.5f; int x = threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int upoff=-(y!=0)*cols; int dnoff=(y<gridDim.y*blockDim.y-1)*cols; //itr0 int pt=x+y*cols; float ph,pn,pu,pd,pl,pr; float g0x,g0y,g0,g1,g1h,g1n,g1u,g1d,g1r,g1l,gx,gy,gt,gsav; float tmp; ph=pp[pt]; pn=pp[pt+blockDim.x]; pr=__shfl_down(ph,2); tmp=__shfl_up(pn,30); if(threadIdx.x>=30){ pr=tmp; } pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx no prior val gsav=__shfl_down(gt,31);//x000000 for next time g0x=threadIdx.x>0?g0x:0.0f;//0xxxxxx g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; x+=32; //itr 1:n-2 for(;x<cols-32;x+=32){ pt=x+y*cols; ph=pn; pn=pp[pt+blockDim.x]; pr=__shfl_down(ph,2); tmp=__shfl_up(pn,30); pr=threadIdx.x>=30?tmp:pr; pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx gsav=__shfl_down(gt,31);//x000000 for next time g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; } //itr n-1 pt=x+y*cols; ph=pn; pr=__shfl_down(ph,2); pl=ph; pu=pp[pt+upoff]; pd=pp[pt+dnoff]; // g0 is the strongest nearby gradient (excluding point defects) gt=fabsf(pr-pl); g0x=__shfl_up(gt,1);//?xxxxxx g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx g0y=fabsf(pd-pu); g0=fmaxf(g0x,g0y); // g1 is the scaled g0 through the g function g1=sqrt(g0); g1=exp(-alpha*g1); //save g1p[pt]=g1; //part2, find gx,gy x = threadIdx.x; y = blockIdx.y * blockDim.y + threadIdx.y; //itr0 pt=x+y*cols; g1h=g1p[pt]; g1n=g1p[pt+blockDim.x]; g1r=__shfl_down(g1h,1); tmp=__shfl_up(g1n,31); if(threadIdx.x>=31){ g1r=tmp; } g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; x+=32; //itr 1:n-2 for(;x<cols-32;x+=32){ pt=x+y*cols; g1h=g1n; g1n=g1p[pt+blockDim.x]; g1r=__shfl_down(g1h,1); tmp=__shfl_up(g1n,31); g1r=threadIdx.x>=31?tmp:g1r; g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; } //itr n-1 pt=x+y*cols; g1h=g1n; g1r=__shfl_down(g1h,1); g1l=g1h; g1u=g1h; g1d=g1p[pt+dnoff]; gx=fmaxf(g1l,g1r); gy=fmaxf(g1u,g1d); //save gxp[pt]=gx; gyp[pt]=gy; #endif } __device__ inline float saturate(float x){ //return x; return x/fmaxf(1.0f,fabsf(x)); } // static __global__ void updateQD (float* gqxpt, float* gqypt, float *dpt, float * apt, // float *gxpt, float *gypt, float sigma_q, float sigma_d, float epsilon, // float theta);//DANGER, no interblock synchronization = weird instability static __global__ void updateQ (float* gqxpt, float* gqypt, float *dpt, float * apt, float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, float theta); static __global__ void updateD (float* gqxpt, float* gqypt, float *dpt, float * apt, float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, float theta); void updateQDCaller(float* gqxpt, float* gqypt, float *dpt, float * apt, float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, float theta) { dim3 dimBlock(BLOCKX2D, BLOCKY2D); dim3 dimGrid(1, (arows + dimBlock.y - 1) / dimBlock.y); CV_Assert(dimGrid.y>0); cudaSafeCall( cudaGetLastError() ); updateQ<<<dimGrid, dimBlock,0,localStream>>>( gqxpt, gqypt, dpt, apt, gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta); cudaSafeCall( cudaGetLastError() ); updateD<<<dimGrid, dimBlock,0,localStream>>>( gqxpt, gqypt, dpt, apt, gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta); cudaSafeCall( cudaGetLastError() ); }; // static __global__ void updateQD (float* gqxpt, float* gqypt, float *dpt, float * apt, // float *gxpt, float *gypt, float sigma_q, float sigma_d, float epsilon, // float theta) { // //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch // // //Original pseudocode for this function: // //void updateQD(){ // // //shifts are shuffles! // // for (all x in blocks of warpsize;;){ // // //qx update // // float dh,dn,qxh,gx,gqx,qyh,gy,gqy; // // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) // // dr=dh<<1; // // tmp=dn>>31; // // if (rt) // // dr=tmp; // // qxh=gqx/gxh; // // qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) // // gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. // // gqxpt[pt]=gqx; // // // // //qy update // // s[bpt]=dn; // // if(!btm){ // // dd=s[bpt+bdnoff]; // // }else{ // // dd=dpt[pt+dnoff]; // // } // // qyh=gqy/gy; // // qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon); // // gqy=saturate(gyh*qyh); // // gqypt[pt]=gqy; // // // // //dx update // // gqr=gqx; // // gql=gqx>>1; // // if (lf) // // gql=gqsave; // // gqsave=gqx<<31;//save for next iter // // dacc = gqr - gql;//dx part // // // // //dy update and d store // // gqd=gqy; // // s[bpt]=gqy; // // if(!top) // // gqu=s[bpt+bupoff]; // // else // // gqu=gqxpt[pt + upoff]; // // dacc += gqd-gqu; //dy part // // d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta); // // dpt[pt]=d; // // } // //} // __shared__ float s[32*BLOCKY2D]; // int x = threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; // bool rt=x==31; // bool lf=x==0; // bool top=y==0; // bool btm=y==rows-1; // bool btop=threadIdx.y==0; // bool bbtm=threadIdx.y==blockDim.y-1; // int pt, bpt,bdnoff ,dnoff, bupoff, upoff; // // // float tmp,gqsave; // gqsave=0; // bpt = threadIdx.x+threadIdx.y*blockDim.x; // bdnoff=blockDim.x; // dnoff=(!btm)*cols; // bupoff=-blockDim.x; // upoff=-(!top)*cols; // // pt=x+y*cols; // // float dh,dn; // dn=dpt[pt]; // // for(;x<cols;x+=32){ // float qx,gx,gqx,qy,gy,gqy; // pt=x+y*cols; // // // //qx update // { // float dr; // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) // // //load // { // dh=dn; // if(x<cols-32){ // dn=dpt[pt+32]; // // } // gqx=gqxpt[pt]; // gx=gxpt[pt]; // // gx=1.0f; // // } // // dr=__shfl_down(dh,1); // tmp=__shfl_up(dn,31); // if (rt && x<cols-32) // dr=tmp; // qx = gqx/gx; // qx = (qx+sigma_q*gx*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) // gqx = saturate(gx*qx);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. // //gqxpt[pt]=gqx; // } // dpt[pt] = dh; // //qy update // { // float dd; // //load // { // gqy=gqypt[pt]; // gy=gypt[pt]; // // gy=1.0f; // } // s[bpt]=dh; // __syncthreads(); // if(!bbtm){ // dd=s[bpt+bdnoff]; // }else{ // dd=dpt[pt+dnoff]; // } // qy = gqy/gy; // qy = (qy+sigma_q*gy*(dd-dh))/(1+sigma_q*epsilon); // gqy = saturate(gy*qy); // //gqypt[pt]=gqy; // } // float dacc; // //dx update // { // float gqr,gql; // gqr=gqx; // gql=__shfl_up(gqx,1); // if (lf) // gql=gqsave; // gqsave=__shfl_down(gqx,31);//save for next iter // dacc = gqr - gql;//dx part // } // float d=dh; // //dy update and d store // { // float a; // //load // { // a=apt[pt]; // } // float gqu,gqd; // // gqd=gqy; // s[bpt]=gqy; // __syncthreads(); // if(!btop) // gqu=s[bpt+bupoff]; // else // gqu=gqypt[pt + upoff]; // if(y==0) // gqu=0.0f; // dacc += gqd-gqu; //dy part // d = ( d + sigma_d*(dacc + a/theta) ) / (1 + sigma_d/theta); // //dpt[pt] = d; // } // __syncthreads(); // gqxpt[pt]=gqx; // gqypt[pt]=gqy; // dpt[pt] = d; // __syncthreads(); // } // } GENERATE_CUDA_FUNC2DROWS(updateQ, (float* gqxpt, float* gqypt, float *dpt, float * apt, float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, float theta), ( gqxpt, gqypt, dpt, apt, gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta)) { //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch //Original pseudocode for this function: //void updateQD(){ // //shifts are shuffles! // for (all x in blocks of warpsize;;){ // //qx update // float dh,dn,qxh,gx,gqx,qyh,gy,gqy; // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) // dr=dh<<1; // tmp=dn>>31; // if (rt) // dr=tmp; // qxh=gqx/gxh; // qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) // gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. // gqxpt[pt]=gqx; // // //qy update // s[bpt]=dn; // if(!btm){ // dd=s[bpt+bdnoff]; // }else{ // dd=dpt[pt+dnoff]; // } // qyh=gqy/gy; // qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon); // gqy=saturate(gyh*qyh); // gqypt[pt]=gqy; // // //dx update // gqr=gqx; // gql=gqx>>1; // if (lf) // gql=gqsave; // gqsave=gqx<<31;//save for next iter // dacc = gqr - gql;//dx part // // //dy update and d store // gqd=gqy; // s[bpt]=gqy; // if(!top) // gqu=s[bpt+bupoff]; // else // gqu=gqxpt[pt + upoff]; // dacc += gqd-gqu; //dy part // d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta); // dpt[pt]=d; // } //} #if __CUDA_ARCH__>300 __shared__ float s[32*BLOCKY2D]; int x = threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; bool rt=x==31; bool bbtm=threadIdx.y==blockDim.y-1; int pt, bpt,bdnoff ,dnoff; float tmp; bpt = threadIdx.x+threadIdx.y*blockDim.x; bdnoff=blockDim.x; dnoff=(y<gridDim.y*blockDim.y-1)*cols; pt=x+y*cols; float dh,dn; dn=dpt[pt]; for(;x<cols;x+=32){ float qx,gx,gqx,qy,gy,gqy; pt=x+y*cols; //qx update { float dr; //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) //load { dh=dn; if(x<cols-32){ dn=dpt[pt+32]; } gqx=gqxpt[pt]; gx=gxpt[pt]+.01f; // gx=1.0f; } dr=__shfl_down(dh,1); tmp=__shfl_up(dn,31); if (rt && x<cols-32) dr=tmp; qx = gqx/gx; //qx+=(gx*(dr-dh)-epsilon*qx)*.5f;//simplified step qx = (qx+sigma_q*gx*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) gqx = saturate(gx*qx);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. gqxpt[pt]=gqx; } //qy update { float dd; //load { gqy=gqypt[pt]; gy=gypt[pt]+.01f; // gy=1.0f; } s[bpt]=dh; __syncthreads(); if(!bbtm) dd=s[bpt+bdnoff]; else dd=dpt[pt+dnoff]; __syncthreads(); qy = gqy/gy; //qy+=(gy*(dd-dh)-epsilon*qy)*.5f;//simplified step qy = (qy+sigma_q*gy*(dd-dh))/(1+sigma_q*epsilon); gqy = saturate(gy*qy); gqypt[pt]=gqy; } //__syncthreads(); } #endif } GENERATE_CUDA_FUNC2DROWS(updateD, (float* gqxpt, float* gqypt, float *dpt, float * apt, float *gxpt, float *gypt,int cols, float sigma_q, float sigma_d, float epsilon, float theta), ( gqxpt, gqypt, dpt, apt, gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta)) { #if __CUDA_ARCH__>300 //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch //Original pseudocode for this function: //void updateQD(){ // //shifts are shuffles! // for (all x in blocks of warpsize){ // //qx update // float dh,dn,qxh,gx,gqx,qyh,gy,gqy; // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) // dr=dh<<1; // tmp=dn>>31; // if (rt) // dr=tmp; // qxh=gqx/gxh; // qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) // gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. // gqxpt[pt]=gqx; // // //qy update // s[bpt]=dn; // if(!btm){ // dd=s[bpt+bdnoff]; // }else{ // dd=dpt[pt+dnoff]; // } // qyh=gqy/gy; // qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon); // gqy=saturate(gyh*qyh); // gqypt[pt]=gqy; // // //dx update // gqr=gqx; // gql=gqx>>1; // if (lf) // gql=gqsave; // gqsave=gqx<<31;//save for next iter // dacc = gqr - gql;//dx part // // //dy update and d store // gqd=gqy; // s[bpt]=gqy; // if(!top) // gqu=s[bpt+bupoff]; // else // gqu=gqxpt[pt + upoff]; // dacc += gqd-gqu; //dy part // d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta); // dpt[pt]=d; // } //} __shared__ float s[32*BLOCKY2D]; int x = threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; bool lf=x==0; bool top=y==0; bool btop=threadIdx.y==0; int pt, bpt, bupoff, upoff; float gqsave=0; bpt = threadIdx.x+threadIdx.y*blockDim.x; bupoff=-blockDim.x; upoff=-(!top)*cols; pt=x+y*cols; for(;x<cols;x+=32){ float gqx,gqy; pt=x+y*cols; float dacc; //dx update { float gqr,gql; gqr=gqx=gqxpt[pt]; gql=__shfl_up(gqx,1); if (lf) gql=gqsave; gqsave=__shfl_down(gqx,31);//save for next iter dacc = gqr - gql;//dx part } //dy update and d store { float a; //load { a=apt[pt]; } float gqu,gqd; float d=dpt[pt]; gqd=gqy=gqypt[pt]; s[bpt]=gqy; __syncthreads(); if(!btop) gqu=s[bpt+bupoff]; else gqu=gqypt[pt + upoff]; if(y==0) gqu=0.0f; dacc += gqd-gqu; //dy part //d += dacc*.5f;//simplified step d = ( d + sigma_d*(dacc + a/theta) ) / (1 + sigma_d/theta); dpt[pt] = d; } __syncthreads();//can't figure out why this is needed, but it is to avoid subtle errors in Qy at the ends of the warp } #endif } }}}}
ae6da2ad3873315aa0dab624c74632940ab349da.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cassert> #include <cstdio> #include <cstdlib> #include <vector> #include <hip/hip_runtime.h> #include <cusolverDn.h> #include "cusolver_utils.h" int main(int argc, char *argv[]) { hipsolverDnHandle_t cusolverH = NULL; hipStream_t stream = NULL; const int m = 3; const int lda = m; /* * | 3.5 0.5 0 | * A = | 0.5 3.5 0 | * | 0 0 2 | * * | 10 2 3 | * B = | 2 10 5 | * | 3 5 10 | */ const std::vector<double> A = {3.5, 0.5, 0, 0.5, 3.5, 0.0, 0.0, 0.0, 2.0}; const std::vector<double> B = {10.0, 2.0, 3.0, 2.0, 10.0, 5.0, 3.0, 5.0, 10.0}; const std::vector<double> lambda = {0.158660256604, 0.370751508101882, 0.6}; std::vector<double> V(lda * m, 0); // eigenvectors std::vector<double> W(m, 0); // eigenvalues double *d_A = nullptr; double *d_B = nullptr; double *d_W = nullptr; int *d_info = nullptr; double *d_work = nullptr; int lwork = 0; int info = 0; double vl = 0.0; double vu = 0.0; int il = 1; int iu = 2; int h_meig = 0; std::printf("A = (matlab base-1)\n"); print_matrix(m, m, A.data(), lda); std::printf("=====\n"); std::printf("B = (matlab base-1)\n"); print_matrix(m, m, B.data(), lda); std::printf("=====\n"); /* step 1: create cusolver handle, bind a stream */ CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); CUSOLVER_CHECK(hipsolverDnSetStream(cusolverH, stream)); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(double) * A.size())); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_B), sizeof(double) * B.size())); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_W), sizeof(double) * W.size())); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_info), sizeof(int))); CUDA_CHECK( hipMemcpyAsync(d_A, A.data(), sizeof(double) * A.size(), hipMemcpyHostToDevice, stream)); CUDA_CHECK( hipMemcpyAsync(d_B, B.data(), sizeof(double) * B.size(), hipMemcpyHostToDevice, stream)); // step 3: query working space of sygvd hipsolverEigType_t itype = HIPSOLVER_EIG_TYPE_1; // A*x = (lambda)*B*x hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_VECTOR; // compute eigenvalues and eigenvectors. cusolverEigRange_t range = CUSOLVER_EIG_RANGE_I; // eigenvalues/eigenvectors in the half-open // interval (vl,vu] will be found hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER; /* cusolverStatus_t cusolverDnDsygvd_bufferSize( hipsolverDnHandle_t handle, hipsolverEigType_t itype, hipsolverEigMode_t jobz, hipblasFillMode_t uplo, int n, const double *A, int lda, const double *B, int ldb, const double *W, int *lwork); */ /* cusolverStatus_t cusolverDnDsygvdx_bufferSize( hipsolverDnHandle_t handle, hipsolverEigType_t itype, hipsolverEigMode_t jobz, cusolverEigRange_t range, <- new hipblasFillMode_t uplo, int n, const double *A, int lda, const double *B, int ldb, double vl, <- new double vu, <- new int il, <- new int iu, <- new int *h_meig, <- new const double *W, int *lwork); */ CUSOLVER_CHECK(cusolverDnDsygvdx_bufferSize(cusolverH, itype, jobz, range, uplo, m, d_A, lda, d_B, lda, vl, vu, il, iu, &h_meig, d_W, &lwork)); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_work), sizeof(double) * lwork)); // step 4: compute spectrum of (A,B) CUSOLVER_CHECK(cusolverDnDsygvdx(cusolverH, itype, jobz, range, uplo, m, d_A, lda, d_B, lda, vl, vu, il, iu, &h_meig, d_W, d_work, lwork, d_info)); CUDA_CHECK( hipMemcpyAsync(V.data(), d_A, sizeof(double) * A.size(), hipMemcpyDeviceToHost, stream)); CUDA_CHECK( hipMemcpyAsync(W.data(), d_W, sizeof(double) * W.size(), hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&info, d_info, sizeof(int), hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); std::printf("after sygvd: info = %d\n", info); if (0 > info) { std::printf("%d-th parameter is wrong \n", -info); exit(1); } std::printf("eigenvalue = (matlab base-1), ascending order\n"); int idx = 1; for (auto const &i : W) { std::printf("W[%i] = %E\n", idx, i); idx++; } std::printf("V = (matlab base-1)\n"); print_matrix(m, m, V.data(), lda); std::printf("=====\n"); std::printf("Eigenvalues found = %d\n", h_meig); // step 4: check eigenvalues double lambda_sup = 0; for (int i = 0; i < m; i++) { double error = fabs(lambda[i] - W[i]); lambda_sup = (lambda_sup > error) ? lambda_sup : error; } std::printf("|lambda - W| = %E\n", lambda_sup); /* free resources */ CUDA_CHECK(hipFree(d_A)); CUDA_CHECK(hipFree(d_B)); CUDA_CHECK(hipFree(d_W)); CUDA_CHECK(hipFree(d_info)); CUDA_CHECK(hipFree(d_work)); CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
ae6da2ad3873315aa0dab624c74632940ab349da.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cassert> #include <cstdio> #include <cstdlib> #include <vector> #include <cuda_runtime.h> #include <cusolverDn.h> #include "cusolver_utils.h" int main(int argc, char *argv[]) { cusolverDnHandle_t cusolverH = NULL; cudaStream_t stream = NULL; const int m = 3; const int lda = m; /* * | 3.5 0.5 0 | * A = | 0.5 3.5 0 | * | 0 0 2 | * * | 10 2 3 | * B = | 2 10 5 | * | 3 5 10 | */ const std::vector<double> A = {3.5, 0.5, 0, 0.5, 3.5, 0.0, 0.0, 0.0, 2.0}; const std::vector<double> B = {10.0, 2.0, 3.0, 2.0, 10.0, 5.0, 3.0, 5.0, 10.0}; const std::vector<double> lambda = {0.158660256604, 0.370751508101882, 0.6}; std::vector<double> V(lda * m, 0); // eigenvectors std::vector<double> W(m, 0); // eigenvalues double *d_A = nullptr; double *d_B = nullptr; double *d_W = nullptr; int *d_info = nullptr; double *d_work = nullptr; int lwork = 0; int info = 0; double vl = 0.0; double vu = 0.0; int il = 1; int iu = 2; int h_meig = 0; std::printf("A = (matlab base-1)\n"); print_matrix(m, m, A.data(), lda); std::printf("=====\n"); std::printf("B = (matlab base-1)\n"); print_matrix(m, m, B.data(), lda); std::printf("=====\n"); /* step 1: create cusolver handle, bind a stream */ CUSOLVER_CHECK(cusolverDnCreate(&cusolverH)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); CUSOLVER_CHECK(cusolverDnSetStream(cusolverH, stream)); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(double) * A.size())); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_B), sizeof(double) * B.size())); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_W), sizeof(double) * W.size())); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_info), sizeof(int))); CUDA_CHECK( cudaMemcpyAsync(d_A, A.data(), sizeof(double) * A.size(), cudaMemcpyHostToDevice, stream)); CUDA_CHECK( cudaMemcpyAsync(d_B, B.data(), sizeof(double) * B.size(), cudaMemcpyHostToDevice, stream)); // step 3: query working space of sygvd cusolverEigType_t itype = CUSOLVER_EIG_TYPE_1; // A*x = (lambda)*B*x cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; // compute eigenvalues and eigenvectors. cusolverEigRange_t range = CUSOLVER_EIG_RANGE_I; // eigenvalues/eigenvectors in the half-open // interval (vl,vu] will be found cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER; /* cusolverStatus_t cusolverDnDsygvd_bufferSize( cusolverDnHandle_t handle, cusolverEigType_t itype, cusolverEigMode_t jobz, cublasFillMode_t uplo, int n, const double *A, int lda, const double *B, int ldb, const double *W, int *lwork); */ /* cusolverStatus_t cusolverDnDsygvdx_bufferSize( cusolverDnHandle_t handle, cusolverEigType_t itype, cusolverEigMode_t jobz, cusolverEigRange_t range, <- new cublasFillMode_t uplo, int n, const double *A, int lda, const double *B, int ldb, double vl, <- new double vu, <- new int il, <- new int iu, <- new int *h_meig, <- new const double *W, int *lwork); */ CUSOLVER_CHECK(cusolverDnDsygvdx_bufferSize(cusolverH, itype, jobz, range, uplo, m, d_A, lda, d_B, lda, vl, vu, il, iu, &h_meig, d_W, &lwork)); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_work), sizeof(double) * lwork)); // step 4: compute spectrum of (A,B) CUSOLVER_CHECK(cusolverDnDsygvdx(cusolverH, itype, jobz, range, uplo, m, d_A, lda, d_B, lda, vl, vu, il, iu, &h_meig, d_W, d_work, lwork, d_info)); CUDA_CHECK( cudaMemcpyAsync(V.data(), d_A, sizeof(double) * A.size(), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK( cudaMemcpyAsync(W.data(), d_W, sizeof(double) * W.size(), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&info, d_info, sizeof(int), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); std::printf("after sygvd: info = %d\n", info); if (0 > info) { std::printf("%d-th parameter is wrong \n", -info); exit(1); } std::printf("eigenvalue = (matlab base-1), ascending order\n"); int idx = 1; for (auto const &i : W) { std::printf("W[%i] = %E\n", idx, i); idx++; } std::printf("V = (matlab base-1)\n"); print_matrix(m, m, V.data(), lda); std::printf("=====\n"); std::printf("Eigenvalues found = %d\n", h_meig); // step 4: check eigenvalues double lambda_sup = 0; for (int i = 0; i < m; i++) { double error = fabs(lambda[i] - W[i]); lambda_sup = (lambda_sup > error) ? lambda_sup : error; } std::printf("|lambda - W| = %E\n", lambda_sup); /* free resources */ CUDA_CHECK(cudaFree(d_A)); CUDA_CHECK(cudaFree(d_B)); CUDA_CHECK(cudaFree(d_W)); CUDA_CHECK(cudaFree(d_info)); CUDA_CHECK(cudaFree(d_work)); CUSOLVER_CHECK(cusolverDnDestroy(cusolverH)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
246fbe930a0860f91b38cea228791dcc1a58f0d6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "compare.h" #include "gputimer.h" // Reference __global__ void smooth(float * v_new, const float * v) { int myIdx = threadIdx.x * gridDim.x + blockIdx.x; int numThreads = blockDim.x * gridDim.x; int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1; int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1; float myElt = v[myIdx]; float myLeftElt = v[myLeftIdx]; float myRightElt = v[myRightIdx]; v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt; } // Your code __global__ void smooth_shared(float * v_new, const float * v) { extern __shared__ float s[]; int myIdx = ( blockIdx.x * blockDim.x ) + threadIdx.x; s[threadIdx.x] = v[myIdx]; int numThreads = blockDim.x * gridDim.x; int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1; int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1; if (threadIdx.x == 0) { s[blockDim.x+1] = v[myLeftIdx]; } if (threadIdx.x == blockDim.x-1) { s[blockDim.x] = v[myRightIdx]; } __syncthreads(); v_new[myIdx] = 0.25f * s[threadIdx.x == 0 ? (blockDim.x + 1) : (threadIdx.x - 1)] + 0.5f * s[threadIdx.x] + 0.25f * s[threadIdx.x+1]; } int main(int argc, char **argv) { const int ARRAY_SIZE = 4096; const int BLOCK_SIZE = 256; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; float h_cmp[ARRAY_SIZE]; float h_out[ARRAY_SIZE]; float h_out_shared[ARRAY_SIZE]; for(int i = 0; i < ARRAY_SIZE; i++) { // generate random float in [0, 1] h_in[i] = (float)random()/(float)RAND_MAX; } for(int i = 0; i < ARRAY_SIZE; i++) { h_cmp[i] = (0.25f * h_in[(i == 0) ? 0 : i-1] + 0.50f * h_in[i] + 0.25f * h_in[(i == (ARRAY_SIZE - 1)) ? ARRAY_SIZE - 1 : i+1]); } // declare GPU memory pointers float * d_in, * d_out, * d_out_shared; // allocate GPU memory hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_out, ARRAY_BYTES); hipMalloc((void **) &d_out_shared, ARRAY_BYTES); // transfer the input array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); // hipEvent_t start, stop; // hipEventCreate(&start); // hipEventCreate(&stop); // launch the kernel GpuTimer timer; timer.Start(); hipLaunchKernelGGL(( smooth), dim3(ARRAY_SIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, d_out, d_in); timer.Stop(); printf("Their code executed in %g ms\n", timer.Elapsed()); timer.Start(); hipLaunchKernelGGL(( smooth_shared), dim3(ARRAY_SIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), (BLOCK_SIZE + 2) * sizeof(float), 0, d_out_shared, d_in); timer.Stop(); printf("Your code executed in %g ms\n", timer.Elapsed()); // hipEventSynchronize(stop); // float elapsedTime; // hipEventElapsedTime(&elapsedTime, start, stop); // copy back the result from GPU hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); hipMemcpy(h_out_shared, d_out_shared, ARRAY_BYTES, hipMemcpyDeviceToHost); // testing for correctness compare(h_in, h_out, h_out_shared, h_cmp, ARRAY_SIZE); // free GPU memory allocation hipFree(d_in); hipFree(d_out); hipFree(d_out_shared); return 0; }
246fbe930a0860f91b38cea228791dcc1a58f0d6.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "compare.h" #include "gputimer.h" // Reference __global__ void smooth(float * v_new, const float * v) { int myIdx = threadIdx.x * gridDim.x + blockIdx.x; int numThreads = blockDim.x * gridDim.x; int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1; int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1; float myElt = v[myIdx]; float myLeftElt = v[myLeftIdx]; float myRightElt = v[myRightIdx]; v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt; } // Your code __global__ void smooth_shared(float * v_new, const float * v) { extern __shared__ float s[]; int myIdx = ( blockIdx.x * blockDim.x ) + threadIdx.x; s[threadIdx.x] = v[myIdx]; int numThreads = blockDim.x * gridDim.x; int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1; int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1; if (threadIdx.x == 0) { s[blockDim.x+1] = v[myLeftIdx]; } if (threadIdx.x == blockDim.x-1) { s[blockDim.x] = v[myRightIdx]; } __syncthreads(); v_new[myIdx] = 0.25f * s[threadIdx.x == 0 ? (blockDim.x + 1) : (threadIdx.x - 1)] + 0.5f * s[threadIdx.x] + 0.25f * s[threadIdx.x+1]; } int main(int argc, char **argv) { const int ARRAY_SIZE = 4096; const int BLOCK_SIZE = 256; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; float h_cmp[ARRAY_SIZE]; float h_out[ARRAY_SIZE]; float h_out_shared[ARRAY_SIZE]; for(int i = 0; i < ARRAY_SIZE; i++) { // generate random float in [0, 1] h_in[i] = (float)random()/(float)RAND_MAX; } for(int i = 0; i < ARRAY_SIZE; i++) { h_cmp[i] = (0.25f * h_in[(i == 0) ? 0 : i-1] + 0.50f * h_in[i] + 0.25f * h_in[(i == (ARRAY_SIZE - 1)) ? ARRAY_SIZE - 1 : i+1]); } // declare GPU memory pointers float * d_in, * d_out, * d_out_shared; // allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); cudaMalloc((void **) &d_out_shared, ARRAY_BYTES); // transfer the input array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // launch the kernel GpuTimer timer; timer.Start(); smooth<<<ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE>>>(d_out, d_in); timer.Stop(); printf("Their code executed in %g ms\n", timer.Elapsed()); timer.Start(); smooth_shared<<<ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE, (BLOCK_SIZE + 2) * sizeof(float)>>>(d_out_shared, d_in); timer.Stop(); printf("Your code executed in %g ms\n", timer.Elapsed()); // cudaEventSynchronize(stop); // float elapsedTime; // cudaEventElapsedTime(&elapsedTime, start, stop); // copy back the result from GPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(h_out_shared, d_out_shared, ARRAY_BYTES, cudaMemcpyDeviceToHost); // testing for correctness compare(h_in, h_out, h_out_shared, h_cmp, ARRAY_SIZE); // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); cudaFree(d_out_shared); return 0; }
216bde7f1b5c0e44294956144965997fe7e02b55.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ /* * Modified to iterate the vector addition multiple times to use as * benchmark/stress test for GPU locking using Cuda call-wrapping * functions. The program's performance is dominated by memory copies * between Host and Device using the copy engine (CE), while computation * on the execution engine (EE) is signigicantly less time consuming than * the copying. * * This version uses a user allocated stream and asynchronous memory * copy operations (hipMemcpyAsync()). Cuda kernel invocations on the * stream are also asynchronous. hipStreamSynchronize() is used to * synchronize with both the copy and kernel executions. Host pinned * memory was also added to better work with the extensive copy operations. * * Modified by Don Smith, Department of Computer Science, * University of North Carolina at Chapel Hill * 2015 */ // control number of iterations by count or elapsed time #define MAX_LOOPS 10000 // iteration count #define TIME_LENGTH 30 // elapsed time (seconds) #include <stdio.h> #include <sys/types.h> #include <unistd.h> #include <sched.h> #include <errno.h> // For the CUDA runtime routines (prefixed with "cuda") #include <hip/hip_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(int argc, char *argv[]) { int i; int count = 0; pid_t my_pid; time_t start_time, now, elapsed; int sync_level = 2; //default -- process blocking my_pid = getpid(); /* * The only parameter is an integer that indicates the desired level of * synchronization used by the GPU driver (values defined below). The * specified level is used in hipSetDeviceFlags() to set the level * prior to initialization. */ if (argc == 2) sync_level = atoi(argv[1]); // level 0 - spin polling (busy waiting) for GPU to finish // level 1 - yield each time through the polling loop to let another thread run // level 2 - block process waiting for GPU to finish switch (sync_level) { case 0: hipSetDeviceFlags(hipDeviceScheduleSpin); fprintf(stderr, "PID %d started > Synch Level is Spin\n", my_pid); break; case 1: hipSetDeviceFlags(hipDeviceScheduleYield); fprintf(stderr, "PID %d started > Synch Level is Yield\n", my_pid); break; default: hipSetDeviceFlags(hipDeviceScheduleBlockingSync); fprintf(stderr, "PID %d started > Synch Level is Block\n", my_pid); } #ifdef SET_PRIORITY /* * WARNING: this code has not been tested. */ // set initial priority to specified value (must be odd > 2) // assume the initial priority is set before CUDA initialied int rc; struct sched_param my_param; int my_prio = 0; if (argc == 2) { my_prio = atoi(argv[1]); if ((my_prio < 3) || ((my_prio % 2) == 0)) my_prio = 0; } if (my_prio == 0) { fprintf(stderr, "PID %d running SCHED_OTHER\n", my_pid); my_param.sched_priority = 0; rc = sched_setscheduler(0, SCHED_OTHER, &my_param); } else { fprintf(stderr, "PID %d running SCHED_FIFO priority %d\n", my_pid, my_prio); my_param.sched_priority = my_prio; rc = sched_setscheduler(0, SCHED_FIFO, &my_param); } if (rc != 0) { fprintf(stderr, "PID %d Set Scheduler FAILED, running default, error %d\n", my_pid, errno); } #endif // Follow convention and initialize CUDA/GPU // used here to invoke initialization of GPU locking hipFree(0); // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // create a user defined stream hipStream_t my_stream; hipStreamCreate(&my_stream); int numElements = 4000000; size_t size = numElements * sizeof(float); // 16,000,000 bytes float *h_A, *h_B, *h_C; // Host allocations in pinned memory // Allocate the host input vector A err = hipHostMalloc((void **)&h_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate host vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the host input vector B err = hipHostMalloc((void **)&h_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate host vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the host output vector C err = hipHostMalloc((void **)&h_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate host vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } #ifdef RESET_PRIORITY /* * WARNING: this code has not been tested. */ // reset main priority to make callback have greater priority if (my_prio > 0) { my_param.sched_priority = my_prio - 1; sched_setscheduler(0, SCHED_FIFO, &my_param); } #endif fprintf(stderr, "PID %d Iterating Vector Add CUDA Kernel for %d seconds, %d max loops\n", my_pid, TIME_LENGTH, MAX_LOOPS); now = start_time = time(NULL); for (i = 0; ((now - TIME_LENGTH) < start_time) && i < MAX_LOOPS; i++) { // copy the A and B vectors from Host to Device memory // these calls are asynchronous so only the lock of CE can be handled in the wrapper err = hipMemcpyAsync(d_A, h_A, size, hipMemcpyHostToDevice, my_stream); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // synchronize with the stream // the wrapper for this function releases any lock held (CE here) hipStreamSynchronize(my_stream); err = hipMemcpyAsync(d_B, h_B, size, hipMemcpyHostToDevice, my_stream); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // synchronize with the stream // the wrapper for this function releases any lock held (CE here) hipStreamSynchronize(my_stream); // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; // lock of EE is handled in wrapper for hipLaunch() hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, my_stream, d_A, d_B, d_C, numElements); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // synchronize with the stream after kernel execution // the wrapper for this function releases any lock held (EE here) hipStreamSynchronize(my_stream); // copy the result vector from Device to Host memory // this call is asynchronous so only the lock of CE can be handled in the wrapper err = hipMemcpyAsync(h_C, d_C, size, hipMemcpyDeviceToHost, my_stream); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // synchronize with the stream // the wrapper for this function releases any lock held (CE here) hipStreamSynchronize(my_stream); now = time(NULL); } // ends for loop elapsed = now - start_time; count = i; // Verify that the result vector is correct // This verification is applied only to the // last result computed for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } fprintf(stderr, "PID %d Test PASSED\n", my_pid); fprintf(stderr, "PID %d completed %d, duration %ld seconds\n", my_pid, count, elapsed); fprintf(stdout, "%d,", count); // Free device global memory for inputs A and B and result C err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory that was pinned hipHostFree(h_A); hipHostFree(h_B); hipHostFree(h_C); // clean up the user allocated stream hipStreamSynchronize(my_stream); hipStreamDestroy(my_stream); // Reset the device and exit // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
216bde7f1b5c0e44294956144965997fe7e02b55.cu
/** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ /* * Modified to iterate the vector addition multiple times to use as * benchmark/stress test for GPU locking using Cuda call-wrapping * functions. The program's performance is dominated by memory copies * between Host and Device using the copy engine (CE), while computation * on the execution engine (EE) is signigicantly less time consuming than * the copying. * * This version uses a user allocated stream and asynchronous memory * copy operations (cudaMemcpyAsync()). Cuda kernel invocations on the * stream are also asynchronous. cudaStreamSynchronize() is used to * synchronize with both the copy and kernel executions. Host pinned * memory was also added to better work with the extensive copy operations. * * Modified by Don Smith, Department of Computer Science, * University of North Carolina at Chapel Hill * 2015 */ // control number of iterations by count or elapsed time #define MAX_LOOPS 10000 // iteration count #define TIME_LENGTH 30 // elapsed time (seconds) #include <stdio.h> #include <sys/types.h> #include <unistd.h> #include <sched.h> #include <errno.h> // For the CUDA runtime routines (prefixed with "cuda") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(int argc, char *argv[]) { int i; int count = 0; pid_t my_pid; time_t start_time, now, elapsed; int sync_level = 2; //default -- process blocking my_pid = getpid(); /* * The only parameter is an integer that indicates the desired level of * synchronization used by the GPU driver (values defined below). The * specified level is used in cudaSetDeviceFlags() to set the level * prior to initialization. */ if (argc == 2) sync_level = atoi(argv[1]); // level 0 - spin polling (busy waiting) for GPU to finish // level 1 - yield each time through the polling loop to let another thread run // level 2 - block process waiting for GPU to finish switch (sync_level) { case 0: cudaSetDeviceFlags(cudaDeviceScheduleSpin); fprintf(stderr, "PID %d started > Synch Level is Spin\n", my_pid); break; case 1: cudaSetDeviceFlags(cudaDeviceScheduleYield); fprintf(stderr, "PID %d started > Synch Level is Yield\n", my_pid); break; default: cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); fprintf(stderr, "PID %d started > Synch Level is Block\n", my_pid); } #ifdef SET_PRIORITY /* * WARNING: this code has not been tested. */ // set initial priority to specified value (must be odd > 2) // assume the initial priority is set before CUDA initialied int rc; struct sched_param my_param; int my_prio = 0; if (argc == 2) { my_prio = atoi(argv[1]); if ((my_prio < 3) || ((my_prio % 2) == 0)) my_prio = 0; } if (my_prio == 0) { fprintf(stderr, "PID %d running SCHED_OTHER\n", my_pid); my_param.sched_priority = 0; rc = sched_setscheduler(0, SCHED_OTHER, &my_param); } else { fprintf(stderr, "PID %d running SCHED_FIFO priority %d\n", my_pid, my_prio); my_param.sched_priority = my_prio; rc = sched_setscheduler(0, SCHED_FIFO, &my_param); } if (rc != 0) { fprintf(stderr, "PID %d Set Scheduler FAILED, running default, error %d\n", my_pid, errno); } #endif // Follow convention and initialize CUDA/GPU // used here to invoke initialization of GPU locking cudaFree(0); // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // create a user defined stream cudaStream_t my_stream; cudaStreamCreate(&my_stream); int numElements = 4000000; size_t size = numElements * sizeof(float); // 16,000,000 bytes float *h_A, *h_B, *h_C; // Host allocations in pinned memory // Allocate the host input vector A err = cudaMallocHost((void **)&h_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate host vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the host input vector B err = cudaMallocHost((void **)&h_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate host vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the host output vector C err = cudaMallocHost((void **)&h_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate host vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } #ifdef RESET_PRIORITY /* * WARNING: this code has not been tested. */ // reset main priority to make callback have greater priority if (my_prio > 0) { my_param.sched_priority = my_prio - 1; sched_setscheduler(0, SCHED_FIFO, &my_param); } #endif fprintf(stderr, "PID %d Iterating Vector Add CUDA Kernel for %d seconds, %d max loops\n", my_pid, TIME_LENGTH, MAX_LOOPS); now = start_time = time(NULL); for (i = 0; ((now - TIME_LENGTH) < start_time) && i < MAX_LOOPS; i++) { // copy the A and B vectors from Host to Device memory // these calls are asynchronous so only the lock of CE can be handled in the wrapper err = cudaMemcpyAsync(d_A, h_A, size, cudaMemcpyHostToDevice, my_stream); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // synchronize with the stream // the wrapper for this function releases any lock held (CE here) cudaStreamSynchronize(my_stream); err = cudaMemcpyAsync(d_B, h_B, size, cudaMemcpyHostToDevice, my_stream); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // synchronize with the stream // the wrapper for this function releases any lock held (CE here) cudaStreamSynchronize(my_stream); // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; // lock of EE is handled in wrapper for cudaLaunch() vectorAdd<<<blocksPerGrid, threadsPerBlock, 0, my_stream>>>(d_A, d_B, d_C, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // synchronize with the stream after kernel execution // the wrapper for this function releases any lock held (EE here) cudaStreamSynchronize(my_stream); // copy the result vector from Device to Host memory // this call is asynchronous so only the lock of CE can be handled in the wrapper err = cudaMemcpyAsync(h_C, d_C, size, cudaMemcpyDeviceToHost, my_stream); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // synchronize with the stream // the wrapper for this function releases any lock held (CE here) cudaStreamSynchronize(my_stream); now = time(NULL); } // ends for loop elapsed = now - start_time; count = i; // Verify that the result vector is correct // This verification is applied only to the // last result computed for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } fprintf(stderr, "PID %d Test PASSED\n", my_pid); fprintf(stderr, "PID %d completed %d, duration %ld seconds\n", my_pid, count, elapsed); fprintf(stdout, "%d,", count); // Free device global memory for inputs A and B and result C err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory that was pinned cudaFreeHost(h_A); cudaFreeHost(h_B); cudaFreeHost(h_C); // clean up the user allocated stream cudaStreamSynchronize(my_stream); cudaStreamDestroy(my_stream); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
3f2f4f26ab691da1511d35ba0ae753c04f028f91.hip
// !!! This is a file automatically generated by hipify!!! #include "../balancing.cuh" #include <cstdio> #include <cstdlib> #include <cstring> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> int main(int argc, char* args[]) { int n_worker = atoi(args[1]); int n_expert = atoi(args[2]); int batch_size = atoi(args[3]); int tot_expert = n_worker * n_expert; long* gate_idx = new long[batch_size]; long* n_gate_idx = new long[batch_size]; int* lec = new int[tot_expert]; memset(lec, 0, sizeof(int) * tot_expert); for (int i = 0; i < batch_size; ++i) { gate_idx[i] = rand() % tot_expert; ++lec[gate_idx[i]]; } for (int i = 0; i < tot_expert; ++i) { lec[i] >>= 1; } int* g_lec; hipMalloc(&g_lec, sizeof(int) * tot_expert); hipMemcpy(g_lec, lec, sizeof(int) * tot_expert, hipMemcpyHostToDevice); long* g_gate_idx; hipMalloc(&g_gate_idx, sizeof(long) * batch_size); hipMemcpy(g_gate_idx, gate_idx, sizeof(long) * batch_size, hipMemcpyHostToDevice); auto smgr = getCudaStreamManager(0); fmoe_cuda_prune_gate_by_capacity_impl(g_gate_idx, g_lec, batch_size, n_expert, n_worker, smgr); hipMemcpy(n_gate_idx, g_gate_idx, sizeof(long) * batch_size, hipMemcpyDeviceToHost); for (int i = 0; i < batch_size; ++i) { printf("%ld %ld (%d)\n", gate_idx[i], n_gate_idx[i], lec[gate_idx[i]]); } }
3f2f4f26ab691da1511d35ba0ae753c04f028f91.cu
#include "../balancing.cuh" #include <cstdio> #include <cstdlib> #include <cstring> #include <cuda.h> #include <cuda_runtime.h> int main(int argc, char* args[]) { int n_worker = atoi(args[1]); int n_expert = atoi(args[2]); int batch_size = atoi(args[3]); int tot_expert = n_worker * n_expert; long* gate_idx = new long[batch_size]; long* n_gate_idx = new long[batch_size]; int* lec = new int[tot_expert]; memset(lec, 0, sizeof(int) * tot_expert); for (int i = 0; i < batch_size; ++i) { gate_idx[i] = rand() % tot_expert; ++lec[gate_idx[i]]; } for (int i = 0; i < tot_expert; ++i) { lec[i] >>= 1; } int* g_lec; cudaMalloc(&g_lec, sizeof(int) * tot_expert); cudaMemcpy(g_lec, lec, sizeof(int) * tot_expert, cudaMemcpyHostToDevice); long* g_gate_idx; cudaMalloc(&g_gate_idx, sizeof(long) * batch_size); cudaMemcpy(g_gate_idx, gate_idx, sizeof(long) * batch_size, cudaMemcpyHostToDevice); auto smgr = getCudaStreamManager(0); fmoe_cuda_prune_gate_by_capacity_impl(g_gate_idx, g_lec, batch_size, n_expert, n_worker, smgr); cudaMemcpy(n_gate_idx, g_gate_idx, sizeof(long) * batch_size, cudaMemcpyDeviceToHost); for (int i = 0; i < batch_size; ++i) { printf("%ld %ld (%d)\n", gate_idx[i], n_gate_idx[i], lec[gate_idx[i]]); } }
efa92e9b9285554d2a3fc917c37377727983eeac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "./gaussian_kernel.h" #define BLOCK 32 #define FILTER_WIDTH 9 // Shared Memory = BLOCK + FILTER_WIDTH - 1 #define SHARED_MEM_SIZE 40 // gaussianBlurGlobal: // Kernel that computes a gaussian blur over a single RGB channel. // This implementation in specific does not use shared memory to // improve performance. __global__ void gaussianBlurGlobal(unsigned char *d_in, unsigned char *d_out, const int num_rows, const int num_cols, float *d_filter, const int filterWidth){ // Initialize row and column operators int in_row, in_col; // Determine location of target pixel in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Ensure the target pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Given the filter width, determine the correct row and col offsets int blur_offset = ((filterWidth-1)/2); // Setup loop variables float blur_sum = 0; int filter_pos = 0; // Iterate from the furthest back row to the furthest forward row for (in_row = gl_row - blur_offset; in_row <= gl_row + blur_offset; in_row++){ // Iterate from the furthest back col to the furthest forward col for (in_col = gl_col - blur_offset; in_col <= gl_col + blur_offset; in_col++){ // Ensure target blur pixel location is valid if (in_row >= 0 && in_row < num_rows && in_col >= 0 && in_col < num_cols){ // Get target blur pixel offset int pixel_offset = in_row * num_cols + in_col; // Multiply current filter location by target pixel and add to running sum blur_sum += (float)d_in[pixel_offset] * d_filter[filter_pos]; } // Always increment filter location filter_pos++; } } // Store results in the correct location of the output array int result_offset = gl_row * num_cols + gl_col; d_out[result_offset] = (unsigned char)blur_sum; } } // gaussianBlurSharedv1: // Kernel that computes a gaussian blur over a single RGB channel. // This implementation in specific uses shared memory to reduce the // number of accesses to global memory to improve performance. __global__ void gaussianBlurSharedv1(unsigned char *d_in, unsigned char *d_out, const int num_rows, const int num_cols, float *d_filter, const int filterWidth){ // Create shared memory input and array __shared__ unsigned char input_pixels[BLOCK*BLOCK]; // Get location in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Get location in shared memory int sh_row = threadIdx.y; int sh_col = threadIdx.x; // Ensure target working pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Load shared memory values for all interal pixels from global memory int global_offset = gl_row * num_cols + gl_col; int shared_offset = sh_row * blockDim.x + sh_col; input_pixels[shared_offset] = d_in[global_offset]; // Make sure all threads have loaded before starting computation __syncthreads(); // Setup loop variables int in_row, in_col; int in_gl_row, in_gl_col; float blur_sum = 0; int filter_pos = 0; // Given the filter width, determine the correct row and col offsets int blur_offset = ((filterWidth-1)/2); // Iterate from the furthest back row to the furthest forward row for (in_row = sh_row - blur_offset; in_row <= sh_row + blur_offset; in_row++){ // Iterate from the furthest back col to the furthest forward col for (in_col = sh_col - blur_offset; in_col <= sh_col + blur_offset; in_col++){ // Target Pixel is In Shared Memory if (in_row >= 0 && in_row < blockDim.y && in_col >= 0 && in_col < blockDim.x){ // Get target blur pixel offset int shared_offset = in_row * blockDim.x + in_col; // Multiply current filter location by target pixel and add to running sum blur_sum += (float)input_pixels[shared_offset] * d_filter[filter_pos]; // Target Pixel is Not In Shared Memory } else { // Ensure target pixel global location is valid in_gl_row = blockIdx.y * blockDim.y + in_row; in_gl_col = blockIdx.x * blockDim.x + in_col; if (in_gl_row >= 0 && in_gl_row < num_rows && in_gl_col >= 0 && in_gl_col < num_cols){ // Get target blur pixel offset int global_offset = in_gl_row * num_cols + in_gl_col; // Multiply current filter location by target pixel and add to running sum blur_sum += (float)d_in[global_offset] * d_filter[filter_pos]; } } // Always increment filter location filter_pos++; } } // Make sure all threads have finished computation __syncthreads(); // Store results in the correct location of the output array int result_offset = gl_row * num_cols + gl_col; d_out[result_offset] = (unsigned char)blur_sum; } } // gaussianBlurSharedv2: // Kernel that computes a gaussian blur over a single RGB channel. // This implementation in specific uses shared memory to reduce the // number of accesses to global memory to improve performance. // // *** Note: To use this approach, the block size and filter width // must be known before hand and set at the top of the document. __global__ void gaussianBlurSharedv2(unsigned char *d_in, unsigned char *d_out, const int num_rows, const int num_cols, float *d_filter){ // Given the filter width, determine the correct size of shared memory int blur_offset = ((FILTER_WIDTH-1)/2); // Create shared memory input array __shared__ unsigned char input_pixels[SHARED_MEM_SIZE * SHARED_MEM_SIZE]; // Get location of pixel in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Get location of pixel in true block data int tr_sh_row = threadIdx.y; int tr_sh_col = threadIdx.x; // Get location of pixel in shared memory int off_sh_row = tr_sh_row + blur_offset; int off_sh_col = tr_sh_col + blur_offset; // Load shared memory with edge pixels loading extra pixels // Ensure working pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Each pixel loads in its own data from global memory int global_offset = gl_row * num_cols + gl_col; int shared_offset = off_sh_row * SHARED_MEM_SIZE + off_sh_col; input_pixels[shared_offset] = d_in[global_offset]; // Top Row Edge Pixels if (tr_sh_row == 0){ // Load in pixels above equal to blur_offset for (int i = 1; i <= blur_offset; i++){ int cur_gl_row = gl_row - i; int cur_sh_row = off_sh_row - i; // Ensure target global pixel is valid if (cur_gl_row >= 0){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + off_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } // Bottom Row Edge Pixels if (tr_sh_row == blockDim.y-1){ // Load in pixels above equal to blur_offset for (int i = 1; i <= blur_offset; i++){ int cur_gl_row = gl_row + i; int cur_sh_row = off_sh_row + i; // Ensure target global pixel is valid if (cur_gl_row < num_rows){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + off_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } // Left Column Edge Pixels if (tr_sh_col == 0){ // Load in pixels above equal to blur_offset for (int i = 1; i <= blur_offset; i++){ int cur_gl_col = gl_col - i; int cur_sh_col = off_sh_col - i; // Ensure target global pixel is valid if (cur_gl_col >= 0){ // If valid, save pixel to shared memory global_offset = gl_row * num_cols + cur_gl_col; shared_offset = off_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } // Right Column Edge Pixels if (tr_sh_col == blockDim.x-1){ // Load in pixels above equal to blur_offset for (int i = 1; i <= blur_offset; i++){ int cur_gl_col = gl_col + i; int cur_sh_col = off_sh_col + i; // Ensure target global pixel is valid if (cur_gl_col < num_cols){ // If valid, save pixel to shared memory global_offset = gl_row * num_cols + cur_gl_col; shared_offset = off_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } // Upper Left Corner Pixel if (tr_sh_row == 0 && tr_sh_col == 0){ // Load in pixels diagonal equal to blur_offset for (int i = 1; i <= blur_offset; i++){ for (int j = 1; j <= blur_offset; j++){ int cur_gl_row = gl_row - i; int cur_gl_col = gl_col - j; int cur_sh_row = off_sh_row - i; int cur_sh_col = off_sh_col - j; // Ensure target global pixel is valid if (cur_gl_row >= 0 && cur_gl_col >= 0){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + cur_gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } } // Upper Right Corner Pixel if (tr_sh_row == 0 && tr_sh_col == blockDim.x-1){ // Load in pixels diagonal equal to blur_offset for (int i = 1; i <= blur_offset; i++){ for (int j = 1; j <= blur_offset; j++){ int cur_gl_row = gl_row - i; int cur_gl_col = gl_col + j; int cur_sh_row = off_sh_row - i; int cur_sh_col = off_sh_col + j; // Ensure target global pixel is valid if (cur_gl_row >= 0 && cur_gl_col < num_cols){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + cur_gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } } // Lower Left Corner Pixel if (tr_sh_row == blockDim.y-1 && tr_sh_col == 0){ // Load in pixels diagonal equal to blur_offset for (int i = 1; i <= blur_offset; i++){ for (int j = 1; j <= blur_offset; j++){ int cur_gl_row = gl_row + i; int cur_gl_col = gl_col - j; int cur_sh_row = off_sh_row + i; int cur_sh_col = off_sh_col - j; // Ensure target global pixel is valid if (cur_gl_row < num_rows && cur_gl_col >= 0){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + cur_gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } } // Lower Right Corner Pixel if (tr_sh_row == blockDim.y-1 && tr_sh_col == blockDim.x-1){ // Load in pixels diagonal equal to blur_offset for (int i = 1; i <= blur_offset; i++){ for (int j = 1; j <= blur_offset; j++){ int cur_gl_row = gl_row + i; int cur_gl_col = gl_col + j; int cur_sh_row = off_sh_row + i; int cur_sh_col = off_sh_col + j; // Ensure target global pixel is valid if (cur_gl_row < num_rows && cur_gl_col < num_cols){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + cur_gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } } // Make sure all threads have loaded before starting computation __syncthreads(); // Begin Calculations by Setting up Loop Variables int row_offset, col_offset; int in_sh_row, in_sh_col; int in_gl_row, in_gl_col; float blur_sum = 0; int filter_pos = 0; // Iterate from the furthest back offset shared row to the furthest forward offset shared row for (row_offset = - blur_offset; row_offset <= blur_offset; row_offset++){ // Iterate from the furthest back offset shared col to the furthest forward offset shared col for (col_offset = - blur_offset; col_offset <= blur_offset; col_offset++){ // Calculate global and shared offsets in_sh_row = off_sh_row + row_offset; in_sh_col = off_sh_col + col_offset; in_gl_row = gl_row + row_offset; in_gl_col = gl_col + col_offset; // Ensure target blur pixel location is valid if (in_gl_row < num_rows && in_gl_col < num_cols && in_gl_row >= 0 && in_gl_col >= 0){ // Get target blur pixel from shared memory shared_offset = in_sh_row * SHARED_MEM_SIZE + in_sh_col; // Multiply current filter location by target pixel and add to running sum blur_sum += (float)input_pixels[shared_offset] * d_filter[filter_pos]; } // Always increment filter location filter_pos++; } } // Make sure all threads have finished computation __syncthreads(); // Store results in the correct location of the output array int result_offset = gl_row * num_cols + gl_col; d_out[result_offset] = (unsigned char)blur_sum; } } // gaussianBlurSepRow: // Kernel that computes a gaussian blur over a single RGB channel // but does this process uses shared memory and splits computations // by each row of the image and Filter. __global__ void gaussianBlurSepRow(unsigned char *d_in, float *d_out, const int num_rows, const int num_cols, float *d_filter, const int filterWidth){ // Create shared memory to hold the full row of values extern __shared__ unsigned char input_pixel_row[]; // Given the filter width, determine the correct col offsets int blur_offset = ((filterWidth-1)/2); // Determine the row this block is working on int gl_row = blockIdx.x; // Determine the filter row this block is working on int filter_row = blockIdx.y; // Determine thread id int thread_id = threadIdx.x; // Determine the number of threads working in each block int total_threads = blockDim.x; // Determine how many pixels of this row each thread should do int pixels_per_thread = ::ceil((float)num_cols/(float)total_threads); // Determine the target pixel for each thread by col offset int col_offset = thread_id * pixels_per_thread; // Ensure starting pixel location is valid if (col_offset < num_cols){ // Load shared memory for (int i = 0; i < pixels_per_thread; i++){ int pixel_col = col_offset + i; if (pixel_col < num_cols){ int global_offset = gl_row * num_cols + pixel_col; input_pixel_row[pixel_col] = d_in[global_offset]; } } } // Make sure all threads have loaded before starting computation __syncthreads(); // Ensure starting pixel location is valid if (col_offset < num_cols){ // Using shared memory, work over pixels per thread for (int i = 0; i < pixels_per_thread; i++){ // Determine target pixels location int pixel_col = col_offset + i; // Setup loop variables int in_col; float blur_sum = 0; int filter_pos = filter_row * filterWidth; if (pixel_col < num_cols){ // Iterate from the furthest back col to the furthest forward col around target pixel for (in_col = pixel_col - blur_offset; in_col <= pixel_col + blur_offset; in_col++){ // Ensure target blur pixel location is valid if (in_col >= 0 && in_col < num_cols){ // Multiply current filter location by target pixel and add to running sum blur_sum += (float)input_pixel_row[in_col] * d_filter[filter_pos]; } // Always increment filter location filter_pos++; } // Given the current working row, filter row, and blur_offset determine the correct result location int result_row = gl_row + (blur_offset - filter_row); // Store the sum in the correct location of the global results using an atomic Add if (result_row >= 0 && result_row < num_rows){ int result_offset = result_row * num_cols + pixel_col; atomicAdd(d_out + (result_offset), blur_sum); } } } } } // gaussianBlurSepCol: // Kernel that computes a gaussian blur over a single RGB channel // but does this process uses shared memory and splits computations // by each column of the image and Filter. __global__ void gaussianBlurSepCol(unsigned char *d_in, float *d_out, const int num_rows, const int num_cols, float *d_filter, const int filterWidth){ // Create shared memory to hold the full col of values extern __shared__ unsigned char input_pixel_col[]; // Given the filter width, determine the correct col offsets int blur_offset = ((filterWidth-1)/2); // Determine the col this block is working on int gl_col = blockIdx.x; // Determine the filter col this block is working on int filter_col = blockIdx.y; // Determine thread id int thread_id = threadIdx.x; // Determine the number of threads working in each block int total_threads = blockDim.x; // Determine how many pixels of this col each thread should do int pixels_per_thread = ::ceil((float)num_rows/(float)total_threads); // Determine the target pixel for each thread by row offset int row_offset = thread_id * pixels_per_thread; // Ensure starting pixel location is valid if (row_offset < num_rows){ // Load shared memory for (int i = 0; i < pixels_per_thread; i++){ int pixel_row = row_offset + i; if (pixel_row < num_rows){ int global_offset = pixel_row * num_cols + gl_col; input_pixel_col[pixel_row] = d_in[global_offset]; } } } // Make sure all threads have loaded before starting computation __syncthreads(); // Ensure starting pixel location is valid if (row_offset < num_rows){ // Using shared memory, work over pixels per thread for (int i = 0; i < pixels_per_thread; i++){ // Determine target pixels location int pixel_row = row_offset + i; // Setup loop variables int in_row; float blur_sum = 0; int filter_pos = filter_col; if (pixel_row < num_rows){ // Iterate from the furthest back row to the furthest forward row around target pixel for (in_row = pixel_row - blur_offset; in_row <= pixel_row + blur_offset; in_row++){ // Ensure target blur pixel location is valid if (in_row >= 0 && in_row < num_rows){ // Multiply current filter location by target pixel and add to running sum blur_sum += (float)input_pixel_col[in_row] * d_filter[filter_pos]; } // Always increment filter location filter_pos = filter_pos + filterWidth; } // Given the current working col, filter col, and blur_offset determine the correct result location int result_col = gl_col + (blur_offset - filter_col); // Store the sum in the correct location of the global results using an atomic Add if (result_col >= 0 && result_col < num_cols){ int result_offset = pixel_row * num_cols + result_col; atomicAdd(d_out + (result_offset), blur_sum); } } } } } __global__ void gaussianBlurSepCombiner(float *d_in, unsigned char *d_out, const int num_rows, const int num_cols){ // Determine location of target pixel in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Ensure the target pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Get pixel location int pixel_offset = gl_row * num_cols + gl_col; // Convert and store temp result in the correct global output d_out[pixel_offset] = (unsigned char)d_in[pixel_offset]; // Reset value to zero d_in[pixel_offset] = 0.0; } } // separateChannels: // Kernel that splits an RGBA uchar4 array into 3 seperate unsigned char // arrays. __global__ void separateChannels(uchar4 *d_imrgba, unsigned char *d_r, unsigned char *d_g, unsigned char *d_b, const int num_rows, const int num_cols){ // Determine location of target pixel in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Ensure the target pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Get pixel location int pixel_offset = gl_row * num_cols + gl_col; // Get corresponding rgba pixel at this location uchar4 rgba_pixel = d_imrgba[pixel_offset]; // Save each pixel element to correct array d_r[pixel_offset] = rgba_pixel.x; d_g[pixel_offset] = rgba_pixel.y; d_b[pixel_offset] = rgba_pixel.z; } } // recombineChannels: // Kernel that combines three given R,G,and B pixel value arrays into // a single uchar4 vector array. __global__ void recombineChannels(unsigned char *d_r, unsigned char *d_g, unsigned char *d_b, uchar4 *d_orgba, const int num_rows, const int num_cols){ // Determine location of target pixel in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Ensure the target pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Get pixel location int pixel_offset = gl_row * num_cols + gl_col; // Create uchar4 using three arrays d_orgba[pixel_offset] = make_uchar4(d_b[pixel_offset],d_g[pixel_offset],d_r[pixel_offset],255); } } void gaussianBlurKernelGlobal(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t num_rows, size_t num_cols, unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue, unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred, float *d_filter, int filterWidth){ // Set grid and block dimensions // For Global and Shared Memory Format dim3 grid(::ceil((float)num_cols/(float)BLOCK),::ceil((float)num_rows/(float)BLOCK),1); dim3 block(BLOCK, BLOCK, 1); // Seperate out each channel into seperate arrays hipLaunchKernelGGL(( separateChannels), dim3(grid), dim3(block), 0, 0, d_imrgba, d_red, d_green, d_blue, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the red pixel array hipLaunchKernelGGL(( gaussianBlurGlobal), dim3(grid), dim3(block), 0, 0, d_red, d_rblurred, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the green pixel array hipLaunchKernelGGL(( gaussianBlurGlobal), dim3(grid), dim3(block), 0, 0, d_green, d_gblurred, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the blue pixel array hipLaunchKernelGGL(( gaussianBlurGlobal), dim3(grid), dim3(block), 0, 0, d_blue, d_bblurred, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Recombine the blurred channels into a single uchar4 array hipLaunchKernelGGL(( recombineChannels), dim3(grid), dim3(block), 0, 0, d_rblurred, d_gblurred, d_bblurred, d_oimrgba, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } void gaussianBlurKernelSharedv1(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t num_rows, size_t num_cols, unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue, unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred, float *d_filter, int filterWidth){ // Set grid and block dimensions // For Global and Shared Memory Format dim3 grid(::ceil((float)num_cols/(float)BLOCK),::ceil((float)num_rows/(float)BLOCK),1); dim3 block(BLOCK, BLOCK, 1); // Seperate out each channel into seperate arrays hipLaunchKernelGGL(( separateChannels), dim3(grid), dim3(block), 0, 0, d_imrgba, d_red, d_green, d_blue, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the red pixel array hipLaunchKernelGGL(( gaussianBlurSharedv1), dim3(grid), dim3(block), 0, 0, d_red, d_rblurred, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the green pixel array hipLaunchKernelGGL(( gaussianBlurSharedv1), dim3(grid), dim3(block), 0, 0, d_green, d_gblurred, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the blue pixel array hipLaunchKernelGGL(( gaussianBlurSharedv1), dim3(grid), dim3(block), 0, 0, d_blue, d_bblurred, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Recombine the blurred channels into a single uchar4 array hipLaunchKernelGGL(( recombineChannels), dim3(grid), dim3(block), 0, 0, d_rblurred, d_gblurred, d_bblurred, d_oimrgba, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } void gaussianBlurKernelSharedv2(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t num_rows, size_t num_cols, unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue, unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred, float *d_filter){ // Set grid and block dimensions // For Global and Shared Memory Format dim3 grid(::ceil((float)num_cols/(float)BLOCK),::ceil((float)num_rows/(float)BLOCK),1); dim3 block(BLOCK, BLOCK, 1); // Seperate out each channel into seperate arrays hipLaunchKernelGGL(( separateChannels), dim3(grid), dim3(block), 0, 0, d_imrgba, d_red, d_green, d_blue, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the red pixel array hipLaunchKernelGGL(( gaussianBlurSharedv2), dim3(grid), dim3(block), 0, 0, d_red, d_rblurred, num_rows, num_cols, d_filter); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the green pixel array hipLaunchKernelGGL(( gaussianBlurSharedv2), dim3(grid), dim3(block), 0, 0, d_green, d_gblurred, num_rows, num_cols, d_filter); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the blue pixel array hipLaunchKernelGGL(( gaussianBlurSharedv2), dim3(grid), dim3(block), 0, 0, d_blue, d_bblurred, num_rows, num_cols, d_filter); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Recombine the blurred channels into a single uchar4 array hipLaunchKernelGGL(( recombineChannels), dim3(grid), dim3(block), 0, 0, d_rblurred, d_gblurred, d_bblurred, d_oimrgba, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } void gaussianBlurKernelSharedSepRow(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t num_rows, size_t num_cols, unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue, unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred, float *d_filter, int filterWidth, float *tmp_pixels){ // Set grid and block dimensions for seperating and recombining dim3 grid(::ceil((float)num_cols/(float)BLOCK),::ceil((float)num_rows/(float)BLOCK),1); dim3 block(BLOCK, BLOCK, 1); // Set grid and block dimensions for seperable row gaussian kernel dim3 gridSep(num_rows,filterWidth,1); dim3 blockSep(BLOCK*BLOCK, 1, 1); // Determine amount of shared memory needed to hold full rows for each kernel call size_t shared_memory_size = num_cols * sizeof(unsigned char); // Seperate out each channel into seperate arrays hipLaunchKernelGGL(( separateChannels), dim3(grid), dim3(block), 0, 0, d_imrgba, d_red, d_green, d_blue, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the red pixel array hipLaunchKernelGGL(( gaussianBlurSepRow), dim3(gridSep), dim3(blockSep), shared_memory_size, 0, d_red, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Convert red pixel results to unsigned chars and reset temp array hipLaunchKernelGGL(( gaussianBlurSepCombiner), dim3(grid), dim3(block), 0, 0, tmp_pixels, d_rblurred, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the green pixel array hipLaunchKernelGGL(( gaussianBlurSepRow), dim3(gridSep), dim3(blockSep), shared_memory_size, 0, d_green, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Convert green pixel results to unsigned chars and reset temp array hipLaunchKernelGGL(( gaussianBlurSepCombiner), dim3(grid), dim3(block), 0, 0, tmp_pixels, d_gblurred, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the blue pixel array hipLaunchKernelGGL(( gaussianBlurSepRow), dim3(gridSep), dim3(blockSep), shared_memory_size, 0, d_blue, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Convert blue pixel results to unsigned chars and reset temp array hipLaunchKernelGGL(( gaussianBlurSepCombiner), dim3(grid), dim3(block), 0, 0, tmp_pixels, d_bblurred, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Recombine the blurred channels into a single uchar4 array hipLaunchKernelGGL(( recombineChannels), dim3(grid), dim3(block), 0, 0, d_rblurred, d_gblurred, d_bblurred, d_oimrgba, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } void gaussianBlurKernelSharedSepCol(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t num_rows, size_t num_cols, unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue, unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred, float *d_filter, int filterWidth, float *tmp_pixels){ // Set grid and block dimensions for seperating and recombining dim3 grid(::ceil((float)num_cols/(float)BLOCK),::ceil((float)num_rows/(float)BLOCK),1); dim3 block(BLOCK, BLOCK, 1); // Set grid and block dimensions for seperable row gaussian kernel dim3 gridSep(num_cols,filterWidth,1); dim3 blockSep(BLOCK*BLOCK, 1, 1); // Determine amount of shared memory needed to hold full rows for each kernel call size_t shared_memory_size = num_rows * sizeof(unsigned char); // Seperate out each channel into seperate arrays hipLaunchKernelGGL(( separateChannels), dim3(grid), dim3(block), 0, 0, d_imrgba, d_red, d_green, d_blue, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the red pixel array hipLaunchKernelGGL(( gaussianBlurSepCol), dim3(gridSep), dim3(blockSep), shared_memory_size, 0, d_red, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Convert red pixel results to unsigned chars and reset temp array hipLaunchKernelGGL(( gaussianBlurSepCombiner), dim3(grid), dim3(block), 0, 0, tmp_pixels, d_rblurred, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the green pixel array hipLaunchKernelGGL(( gaussianBlurSepCol), dim3(gridSep), dim3(blockSep), shared_memory_size, 0, d_green, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Convert green pixel results to unsigned chars and reset temp array hipLaunchKernelGGL(( gaussianBlurSepCombiner), dim3(grid), dim3(block), 0, 0, tmp_pixels, d_gblurred, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Compute Gaussian Blur for the blue pixel array hipLaunchKernelGGL(( gaussianBlurSepCol), dim3(gridSep), dim3(blockSep), shared_memory_size, 0, d_blue, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Convert blue pixel results to unsigned chars and reset temp array hipLaunchKernelGGL(( gaussianBlurSepCombiner), dim3(grid), dim3(block), 0, 0, tmp_pixels, d_bblurred, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Recombine the blurred channels into a single uchar4 array hipLaunchKernelGGL(( recombineChannels), dim3(grid), dim3(block), 0, 0, d_rblurred, d_gblurred, d_bblurred, d_oimrgba, num_rows, num_cols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
efa92e9b9285554d2a3fc917c37377727983eeac.cu
#include "./gaussian_kernel.h" #define BLOCK 32 #define FILTER_WIDTH 9 // Shared Memory = BLOCK + FILTER_WIDTH - 1 #define SHARED_MEM_SIZE 40 // gaussianBlurGlobal: // Kernel that computes a gaussian blur over a single RGB channel. // This implementation in specific does not use shared memory to // improve performance. __global__ void gaussianBlurGlobal(unsigned char *d_in, unsigned char *d_out, const int num_rows, const int num_cols, float *d_filter, const int filterWidth){ // Initialize row and column operators int in_row, in_col; // Determine location of target pixel in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Ensure the target pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Given the filter width, determine the correct row and col offsets int blur_offset = ((filterWidth-1)/2); // Setup loop variables float blur_sum = 0; int filter_pos = 0; // Iterate from the furthest back row to the furthest forward row for (in_row = gl_row - blur_offset; in_row <= gl_row + blur_offset; in_row++){ // Iterate from the furthest back col to the furthest forward col for (in_col = gl_col - blur_offset; in_col <= gl_col + blur_offset; in_col++){ // Ensure target blur pixel location is valid if (in_row >= 0 && in_row < num_rows && in_col >= 0 && in_col < num_cols){ // Get target blur pixel offset int pixel_offset = in_row * num_cols + in_col; // Multiply current filter location by target pixel and add to running sum blur_sum += (float)d_in[pixel_offset] * d_filter[filter_pos]; } // Always increment filter location filter_pos++; } } // Store results in the correct location of the output array int result_offset = gl_row * num_cols + gl_col; d_out[result_offset] = (unsigned char)blur_sum; } } // gaussianBlurSharedv1: // Kernel that computes a gaussian blur over a single RGB channel. // This implementation in specific uses shared memory to reduce the // number of accesses to global memory to improve performance. __global__ void gaussianBlurSharedv1(unsigned char *d_in, unsigned char *d_out, const int num_rows, const int num_cols, float *d_filter, const int filterWidth){ // Create shared memory input and array __shared__ unsigned char input_pixels[BLOCK*BLOCK]; // Get location in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Get location in shared memory int sh_row = threadIdx.y; int sh_col = threadIdx.x; // Ensure target working pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Load shared memory values for all interal pixels from global memory int global_offset = gl_row * num_cols + gl_col; int shared_offset = sh_row * blockDim.x + sh_col; input_pixels[shared_offset] = d_in[global_offset]; // Make sure all threads have loaded before starting computation __syncthreads(); // Setup loop variables int in_row, in_col; int in_gl_row, in_gl_col; float blur_sum = 0; int filter_pos = 0; // Given the filter width, determine the correct row and col offsets int blur_offset = ((filterWidth-1)/2); // Iterate from the furthest back row to the furthest forward row for (in_row = sh_row - blur_offset; in_row <= sh_row + blur_offset; in_row++){ // Iterate from the furthest back col to the furthest forward col for (in_col = sh_col - blur_offset; in_col <= sh_col + blur_offset; in_col++){ // Target Pixel is In Shared Memory if (in_row >= 0 && in_row < blockDim.y && in_col >= 0 && in_col < blockDim.x){ // Get target blur pixel offset int shared_offset = in_row * blockDim.x + in_col; // Multiply current filter location by target pixel and add to running sum blur_sum += (float)input_pixels[shared_offset] * d_filter[filter_pos]; // Target Pixel is Not In Shared Memory } else { // Ensure target pixel global location is valid in_gl_row = blockIdx.y * blockDim.y + in_row; in_gl_col = blockIdx.x * blockDim.x + in_col; if (in_gl_row >= 0 && in_gl_row < num_rows && in_gl_col >= 0 && in_gl_col < num_cols){ // Get target blur pixel offset int global_offset = in_gl_row * num_cols + in_gl_col; // Multiply current filter location by target pixel and add to running sum blur_sum += (float)d_in[global_offset] * d_filter[filter_pos]; } } // Always increment filter location filter_pos++; } } // Make sure all threads have finished computation __syncthreads(); // Store results in the correct location of the output array int result_offset = gl_row * num_cols + gl_col; d_out[result_offset] = (unsigned char)blur_sum; } } // gaussianBlurSharedv2: // Kernel that computes a gaussian blur over a single RGB channel. // This implementation in specific uses shared memory to reduce the // number of accesses to global memory to improve performance. // // *** Note: To use this approach, the block size and filter width // must be known before hand and set at the top of the document. __global__ void gaussianBlurSharedv2(unsigned char *d_in, unsigned char *d_out, const int num_rows, const int num_cols, float *d_filter){ // Given the filter width, determine the correct size of shared memory int blur_offset = ((FILTER_WIDTH-1)/2); // Create shared memory input array __shared__ unsigned char input_pixels[SHARED_MEM_SIZE * SHARED_MEM_SIZE]; // Get location of pixel in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Get location of pixel in true block data int tr_sh_row = threadIdx.y; int tr_sh_col = threadIdx.x; // Get location of pixel in shared memory int off_sh_row = tr_sh_row + blur_offset; int off_sh_col = tr_sh_col + blur_offset; // Load shared memory with edge pixels loading extra pixels // Ensure working pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Each pixel loads in its own data from global memory int global_offset = gl_row * num_cols + gl_col; int shared_offset = off_sh_row * SHARED_MEM_SIZE + off_sh_col; input_pixels[shared_offset] = d_in[global_offset]; // Top Row Edge Pixels if (tr_sh_row == 0){ // Load in pixels above equal to blur_offset for (int i = 1; i <= blur_offset; i++){ int cur_gl_row = gl_row - i; int cur_sh_row = off_sh_row - i; // Ensure target global pixel is valid if (cur_gl_row >= 0){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + off_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } // Bottom Row Edge Pixels if (tr_sh_row == blockDim.y-1){ // Load in pixels above equal to blur_offset for (int i = 1; i <= blur_offset; i++){ int cur_gl_row = gl_row + i; int cur_sh_row = off_sh_row + i; // Ensure target global pixel is valid if (cur_gl_row < num_rows){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + off_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } // Left Column Edge Pixels if (tr_sh_col == 0){ // Load in pixels above equal to blur_offset for (int i = 1; i <= blur_offset; i++){ int cur_gl_col = gl_col - i; int cur_sh_col = off_sh_col - i; // Ensure target global pixel is valid if (cur_gl_col >= 0){ // If valid, save pixel to shared memory global_offset = gl_row * num_cols + cur_gl_col; shared_offset = off_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } // Right Column Edge Pixels if (tr_sh_col == blockDim.x-1){ // Load in pixels above equal to blur_offset for (int i = 1; i <= blur_offset; i++){ int cur_gl_col = gl_col + i; int cur_sh_col = off_sh_col + i; // Ensure target global pixel is valid if (cur_gl_col < num_cols){ // If valid, save pixel to shared memory global_offset = gl_row * num_cols + cur_gl_col; shared_offset = off_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } // Upper Left Corner Pixel if (tr_sh_row == 0 && tr_sh_col == 0){ // Load in pixels diagonal equal to blur_offset for (int i = 1; i <= blur_offset; i++){ for (int j = 1; j <= blur_offset; j++){ int cur_gl_row = gl_row - i; int cur_gl_col = gl_col - j; int cur_sh_row = off_sh_row - i; int cur_sh_col = off_sh_col - j; // Ensure target global pixel is valid if (cur_gl_row >= 0 && cur_gl_col >= 0){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + cur_gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } } // Upper Right Corner Pixel if (tr_sh_row == 0 && tr_sh_col == blockDim.x-1){ // Load in pixels diagonal equal to blur_offset for (int i = 1; i <= blur_offset; i++){ for (int j = 1; j <= blur_offset; j++){ int cur_gl_row = gl_row - i; int cur_gl_col = gl_col + j; int cur_sh_row = off_sh_row - i; int cur_sh_col = off_sh_col + j; // Ensure target global pixel is valid if (cur_gl_row >= 0 && cur_gl_col < num_cols){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + cur_gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } } // Lower Left Corner Pixel if (tr_sh_row == blockDim.y-1 && tr_sh_col == 0){ // Load in pixels diagonal equal to blur_offset for (int i = 1; i <= blur_offset; i++){ for (int j = 1; j <= blur_offset; j++){ int cur_gl_row = gl_row + i; int cur_gl_col = gl_col - j; int cur_sh_row = off_sh_row + i; int cur_sh_col = off_sh_col - j; // Ensure target global pixel is valid if (cur_gl_row < num_rows && cur_gl_col >= 0){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + cur_gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } } // Lower Right Corner Pixel if (tr_sh_row == blockDim.y-1 && tr_sh_col == blockDim.x-1){ // Load in pixels diagonal equal to blur_offset for (int i = 1; i <= blur_offset; i++){ for (int j = 1; j <= blur_offset; j++){ int cur_gl_row = gl_row + i; int cur_gl_col = gl_col + j; int cur_sh_row = off_sh_row + i; int cur_sh_col = off_sh_col + j; // Ensure target global pixel is valid if (cur_gl_row < num_rows && cur_gl_col < num_cols){ // If valid, save pixel to shared memory global_offset = cur_gl_row * num_cols + cur_gl_col; shared_offset = cur_sh_row * SHARED_MEM_SIZE + cur_sh_col; input_pixels[shared_offset] = d_in[global_offset]; } } } } // Make sure all threads have loaded before starting computation __syncthreads(); // Begin Calculations by Setting up Loop Variables int row_offset, col_offset; int in_sh_row, in_sh_col; int in_gl_row, in_gl_col; float blur_sum = 0; int filter_pos = 0; // Iterate from the furthest back offset shared row to the furthest forward offset shared row for (row_offset = - blur_offset; row_offset <= blur_offset; row_offset++){ // Iterate from the furthest back offset shared col to the furthest forward offset shared col for (col_offset = - blur_offset; col_offset <= blur_offset; col_offset++){ // Calculate global and shared offsets in_sh_row = off_sh_row + row_offset; in_sh_col = off_sh_col + col_offset; in_gl_row = gl_row + row_offset; in_gl_col = gl_col + col_offset; // Ensure target blur pixel location is valid if (in_gl_row < num_rows && in_gl_col < num_cols && in_gl_row >= 0 && in_gl_col >= 0){ // Get target blur pixel from shared memory shared_offset = in_sh_row * SHARED_MEM_SIZE + in_sh_col; // Multiply current filter location by target pixel and add to running sum blur_sum += (float)input_pixels[shared_offset] * d_filter[filter_pos]; } // Always increment filter location filter_pos++; } } // Make sure all threads have finished computation __syncthreads(); // Store results in the correct location of the output array int result_offset = gl_row * num_cols + gl_col; d_out[result_offset] = (unsigned char)blur_sum; } } // gaussianBlurSepRow: // Kernel that computes a gaussian blur over a single RGB channel // but does this process uses shared memory and splits computations // by each row of the image and Filter. __global__ void gaussianBlurSepRow(unsigned char *d_in, float *d_out, const int num_rows, const int num_cols, float *d_filter, const int filterWidth){ // Create shared memory to hold the full row of values extern __shared__ unsigned char input_pixel_row[]; // Given the filter width, determine the correct col offsets int blur_offset = ((filterWidth-1)/2); // Determine the row this block is working on int gl_row = blockIdx.x; // Determine the filter row this block is working on int filter_row = blockIdx.y; // Determine thread id int thread_id = threadIdx.x; // Determine the number of threads working in each block int total_threads = blockDim.x; // Determine how many pixels of this row each thread should do int pixels_per_thread = std::ceil((float)num_cols/(float)total_threads); // Determine the target pixel for each thread by col offset int col_offset = thread_id * pixels_per_thread; // Ensure starting pixel location is valid if (col_offset < num_cols){ // Load shared memory for (int i = 0; i < pixels_per_thread; i++){ int pixel_col = col_offset + i; if (pixel_col < num_cols){ int global_offset = gl_row * num_cols + pixel_col; input_pixel_row[pixel_col] = d_in[global_offset]; } } } // Make sure all threads have loaded before starting computation __syncthreads(); // Ensure starting pixel location is valid if (col_offset < num_cols){ // Using shared memory, work over pixels per thread for (int i = 0; i < pixels_per_thread; i++){ // Determine target pixels location int pixel_col = col_offset + i; // Setup loop variables int in_col; float blur_sum = 0; int filter_pos = filter_row * filterWidth; if (pixel_col < num_cols){ // Iterate from the furthest back col to the furthest forward col around target pixel for (in_col = pixel_col - blur_offset; in_col <= pixel_col + blur_offset; in_col++){ // Ensure target blur pixel location is valid if (in_col >= 0 && in_col < num_cols){ // Multiply current filter location by target pixel and add to running sum blur_sum += (float)input_pixel_row[in_col] * d_filter[filter_pos]; } // Always increment filter location filter_pos++; } // Given the current working row, filter row, and blur_offset determine the correct result location int result_row = gl_row + (blur_offset - filter_row); // Store the sum in the correct location of the global results using an atomic Add if (result_row >= 0 && result_row < num_rows){ int result_offset = result_row * num_cols + pixel_col; atomicAdd(d_out + (result_offset), blur_sum); } } } } } // gaussianBlurSepCol: // Kernel that computes a gaussian blur over a single RGB channel // but does this process uses shared memory and splits computations // by each column of the image and Filter. __global__ void gaussianBlurSepCol(unsigned char *d_in, float *d_out, const int num_rows, const int num_cols, float *d_filter, const int filterWidth){ // Create shared memory to hold the full col of values extern __shared__ unsigned char input_pixel_col[]; // Given the filter width, determine the correct col offsets int blur_offset = ((filterWidth-1)/2); // Determine the col this block is working on int gl_col = blockIdx.x; // Determine the filter col this block is working on int filter_col = blockIdx.y; // Determine thread id int thread_id = threadIdx.x; // Determine the number of threads working in each block int total_threads = blockDim.x; // Determine how many pixels of this col each thread should do int pixels_per_thread = std::ceil((float)num_rows/(float)total_threads); // Determine the target pixel for each thread by row offset int row_offset = thread_id * pixels_per_thread; // Ensure starting pixel location is valid if (row_offset < num_rows){ // Load shared memory for (int i = 0; i < pixels_per_thread; i++){ int pixel_row = row_offset + i; if (pixel_row < num_rows){ int global_offset = pixel_row * num_cols + gl_col; input_pixel_col[pixel_row] = d_in[global_offset]; } } } // Make sure all threads have loaded before starting computation __syncthreads(); // Ensure starting pixel location is valid if (row_offset < num_rows){ // Using shared memory, work over pixels per thread for (int i = 0; i < pixels_per_thread; i++){ // Determine target pixels location int pixel_row = row_offset + i; // Setup loop variables int in_row; float blur_sum = 0; int filter_pos = filter_col; if (pixel_row < num_rows){ // Iterate from the furthest back row to the furthest forward row around target pixel for (in_row = pixel_row - blur_offset; in_row <= pixel_row + blur_offset; in_row++){ // Ensure target blur pixel location is valid if (in_row >= 0 && in_row < num_rows){ // Multiply current filter location by target pixel and add to running sum blur_sum += (float)input_pixel_col[in_row] * d_filter[filter_pos]; } // Always increment filter location filter_pos = filter_pos + filterWidth; } // Given the current working col, filter col, and blur_offset determine the correct result location int result_col = gl_col + (blur_offset - filter_col); // Store the sum in the correct location of the global results using an atomic Add if (result_col >= 0 && result_col < num_cols){ int result_offset = pixel_row * num_cols + result_col; atomicAdd(d_out + (result_offset), blur_sum); } } } } } __global__ void gaussianBlurSepCombiner(float *d_in, unsigned char *d_out, const int num_rows, const int num_cols){ // Determine location of target pixel in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Ensure the target pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Get pixel location int pixel_offset = gl_row * num_cols + gl_col; // Convert and store temp result in the correct global output d_out[pixel_offset] = (unsigned char)d_in[pixel_offset]; // Reset value to zero d_in[pixel_offset] = 0.0; } } // separateChannels: // Kernel that splits an RGBA uchar4 array into 3 seperate unsigned char // arrays. __global__ void separateChannels(uchar4 *d_imrgba, unsigned char *d_r, unsigned char *d_g, unsigned char *d_b, const int num_rows, const int num_cols){ // Determine location of target pixel in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Ensure the target pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Get pixel location int pixel_offset = gl_row * num_cols + gl_col; // Get corresponding rgba pixel at this location uchar4 rgba_pixel = d_imrgba[pixel_offset]; // Save each pixel element to correct array d_r[pixel_offset] = rgba_pixel.x; d_g[pixel_offset] = rgba_pixel.y; d_b[pixel_offset] = rgba_pixel.z; } } // recombineChannels: // Kernel that combines three given R,G,and B pixel value arrays into // a single uchar4 vector array. __global__ void recombineChannels(unsigned char *d_r, unsigned char *d_g, unsigned char *d_b, uchar4 *d_orgba, const int num_rows, const int num_cols){ // Determine location of target pixel in global memory int gl_row = blockIdx.y * blockDim.y + threadIdx.y; int gl_col = blockIdx.x * blockDim.x + threadIdx.x; // Ensure the target pixel is valid if (gl_col < num_cols && gl_row < num_rows){ // Get pixel location int pixel_offset = gl_row * num_cols + gl_col; // Create uchar4 using three arrays d_orgba[pixel_offset] = make_uchar4(d_b[pixel_offset],d_g[pixel_offset],d_r[pixel_offset],255); } } void gaussianBlurKernelGlobal(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t num_rows, size_t num_cols, unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue, unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred, float *d_filter, int filterWidth){ // Set grid and block dimensions // For Global and Shared Memory Format dim3 grid(std::ceil((float)num_cols/(float)BLOCK),std::ceil((float)num_rows/(float)BLOCK),1); dim3 block(BLOCK, BLOCK, 1); // Seperate out each channel into seperate arrays separateChannels<<<grid, block>>>(d_imrgba, d_red, d_green, d_blue, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the red pixel array gaussianBlurGlobal<<<grid, block>>>(d_red, d_rblurred, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the green pixel array gaussianBlurGlobal<<<grid, block>>>(d_green, d_gblurred, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the blue pixel array gaussianBlurGlobal<<<grid, block>>>(d_blue, d_bblurred, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Recombine the blurred channels into a single uchar4 array recombineChannels<<<grid, block>>>(d_rblurred, d_gblurred, d_bblurred, d_oimrgba, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void gaussianBlurKernelSharedv1(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t num_rows, size_t num_cols, unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue, unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred, float *d_filter, int filterWidth){ // Set grid and block dimensions // For Global and Shared Memory Format dim3 grid(std::ceil((float)num_cols/(float)BLOCK),std::ceil((float)num_rows/(float)BLOCK),1); dim3 block(BLOCK, BLOCK, 1); // Seperate out each channel into seperate arrays separateChannels<<<grid, block>>>(d_imrgba, d_red, d_green, d_blue, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the red pixel array gaussianBlurSharedv1<<<grid, block>>>(d_red, d_rblurred, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the green pixel array gaussianBlurSharedv1<<<grid, block>>>(d_green, d_gblurred, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the blue pixel array gaussianBlurSharedv1<<<grid, block>>>(d_blue, d_bblurred, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Recombine the blurred channels into a single uchar4 array recombineChannels<<<grid, block>>>(d_rblurred, d_gblurred, d_bblurred, d_oimrgba, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void gaussianBlurKernelSharedv2(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t num_rows, size_t num_cols, unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue, unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred, float *d_filter){ // Set grid and block dimensions // For Global and Shared Memory Format dim3 grid(std::ceil((float)num_cols/(float)BLOCK),std::ceil((float)num_rows/(float)BLOCK),1); dim3 block(BLOCK, BLOCK, 1); // Seperate out each channel into seperate arrays separateChannels<<<grid, block>>>(d_imrgba, d_red, d_green, d_blue, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the red pixel array gaussianBlurSharedv2<<<grid, block>>>(d_red, d_rblurred, num_rows, num_cols, d_filter); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the green pixel array gaussianBlurSharedv2<<<grid, block>>>(d_green, d_gblurred, num_rows, num_cols, d_filter); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the blue pixel array gaussianBlurSharedv2<<<grid, block>>>(d_blue, d_bblurred, num_rows, num_cols, d_filter); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Recombine the blurred channels into a single uchar4 array recombineChannels<<<grid, block>>>(d_rblurred, d_gblurred, d_bblurred, d_oimrgba, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void gaussianBlurKernelSharedSepRow(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t num_rows, size_t num_cols, unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue, unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred, float *d_filter, int filterWidth, float *tmp_pixels){ // Set grid and block dimensions for seperating and recombining dim3 grid(std::ceil((float)num_cols/(float)BLOCK),std::ceil((float)num_rows/(float)BLOCK),1); dim3 block(BLOCK, BLOCK, 1); // Set grid and block dimensions for seperable row gaussian kernel dim3 gridSep(num_rows,filterWidth,1); dim3 blockSep(BLOCK*BLOCK, 1, 1); // Determine amount of shared memory needed to hold full rows for each kernel call size_t shared_memory_size = num_cols * sizeof(unsigned char); // Seperate out each channel into seperate arrays separateChannels<<<grid, block>>>(d_imrgba, d_red, d_green, d_blue, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the red pixel array gaussianBlurSepRow<<<gridSep, blockSep, shared_memory_size>>>(d_red, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Convert red pixel results to unsigned chars and reset temp array gaussianBlurSepCombiner<<<grid, block>>>(tmp_pixels, d_rblurred, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the green pixel array gaussianBlurSepRow<<<gridSep, blockSep, shared_memory_size>>>(d_green, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Convert green pixel results to unsigned chars and reset temp array gaussianBlurSepCombiner<<<grid, block>>>(tmp_pixels, d_gblurred, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the blue pixel array gaussianBlurSepRow<<<gridSep, blockSep, shared_memory_size>>>(d_blue, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Convert blue pixel results to unsigned chars and reset temp array gaussianBlurSepCombiner<<<grid, block>>>(tmp_pixels, d_bblurred, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Recombine the blurred channels into a single uchar4 array recombineChannels<<<grid, block>>>(d_rblurred, d_gblurred, d_bblurred, d_oimrgba, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void gaussianBlurKernelSharedSepCol(uchar4* d_imrgba, uchar4 *d_oimrgba, size_t num_rows, size_t num_cols, unsigned char *d_red, unsigned char *d_green, unsigned char *d_blue, unsigned char *d_rblurred, unsigned char *d_gblurred, unsigned char *d_bblurred, float *d_filter, int filterWidth, float *tmp_pixels){ // Set grid and block dimensions for seperating and recombining dim3 grid(std::ceil((float)num_cols/(float)BLOCK),std::ceil((float)num_rows/(float)BLOCK),1); dim3 block(BLOCK, BLOCK, 1); // Set grid and block dimensions for seperable row gaussian kernel dim3 gridSep(num_cols,filterWidth,1); dim3 blockSep(BLOCK*BLOCK, 1, 1); // Determine amount of shared memory needed to hold full rows for each kernel call size_t shared_memory_size = num_rows * sizeof(unsigned char); // Seperate out each channel into seperate arrays separateChannels<<<grid, block>>>(d_imrgba, d_red, d_green, d_blue, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the red pixel array gaussianBlurSepCol<<<gridSep, blockSep, shared_memory_size>>>(d_red, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Convert red pixel results to unsigned chars and reset temp array gaussianBlurSepCombiner<<<grid, block>>>(tmp_pixels, d_rblurred, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the green pixel array gaussianBlurSepCol<<<gridSep, blockSep, shared_memory_size>>>(d_green, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Convert green pixel results to unsigned chars and reset temp array gaussianBlurSepCombiner<<<grid, block>>>(tmp_pixels, d_gblurred, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Compute Gaussian Blur for the blue pixel array gaussianBlurSepCol<<<gridSep, blockSep, shared_memory_size>>>(d_blue, tmp_pixels, num_rows, num_cols, d_filter, filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Convert blue pixel results to unsigned chars and reset temp array gaussianBlurSepCombiner<<<grid, block>>>(tmp_pixels, d_bblurred, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Recombine the blurred channels into a single uchar4 array recombineChannels<<<grid, block>>>(d_rblurred, d_gblurred, d_bblurred, d_oimrgba, num_rows, num_cols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
eaf29298c492b56c6c4dc1cae144dbb04944faf8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/device_ptr.h> #include <thrust/transform.h> #include <thrust/execution_policy.h> #include "common.h" #include "bn.h" /* * Device functions and data structures */ struct Float2 { float v1, v2; __device__ Float2() {} __device__ Float2(float _v1, float _v2) : v1(_v1), v2(_v2) {} __device__ Float2(float v) : v1(v), v2(v) {} __device__ Float2(int v) : v1(v), v2(v) {} __device__ Float2 &operator+=(const Float2 &a) { v1 += a.v1; v2 += a.v2; return *this; } }; struct SumOp { __device__ SumOp(const float *t, int c, int s) : tensor(t), C(c), S(s) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { return tensor[(batch * C + plane) * S + n]; } const float *tensor; const int C; const int S; }; struct VarOp { __device__ VarOp(float m, const float *t, int c, int s) : mean(m), tensor(t), C(c), S(s) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { float val = tensor[(batch * C + plane) * S + n]; return (val - mean) * (val - mean); } const float mean; const float *tensor; const int C; const int S; }; struct GradOp { __device__ GradOp(float _gamma, float _beta, const float *_z, const float *_dz, int c, int s) : gamma(_gamma), beta(_beta), z(_z), dz(_dz), C(c), S(s) {} __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { float _y = (z[(batch * C + plane) * S + n] - beta) / gamma; float _dz = dz[(batch * C + plane) * S + n]; return Float2(_dz, _y * _dz); } const float gamma; const float beta; const float *z; const float *dz; const int C; const int S; }; static __device__ __forceinline__ float warpSum(float val) { #if __CUDA_ARCH__ >= 300 for (int i = 0; i < getMSB(WARP_SIZE); ++i) { val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); } #else __shared__ float values[MAX_BLOCK_SIZE]; values[threadIdx.x] = val; __threadfence_block(); const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; for (int i = 1; i < WARP_SIZE; i++) { val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; } #endif return val; } static __device__ __forceinline__ Float2 warpSum(Float2 value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } template <typename T, typename Op> __device__ T reduce(Op op, int plane, int N, int C, int S) { T sum = (T)0; for (int batch = 0; batch < N; ++batch) { for (int x = threadIdx.x; x < S; x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } /* * Kernels */ __global__ void mean_var_kernel(const float *x, float *mean, float *var, int N, int C, int S) { int plane = blockIdx.x; float norm = 1.f / (N * S); float _mean = reduce<float, SumOp>(SumOp(x, C, S), plane, N, C, S) * norm; __syncthreads(); float _var = reduce<float, VarOp>(VarOp(_mean, x, C, S), plane, N, C, S) * norm; if (threadIdx.x == 0) { mean[plane] = _mean; var[plane] = _var; } } __global__ void forward_kernel(const float *x, const float *mean, const float *var, const float *weight, const float *bias, float *y, float *z, float eps, int N, int C, int S) { int plane = blockIdx.x; float _mean = mean[plane]; float _var = var[plane]; float invStd = 0; if (_var != 0.f || eps != 0.f) { invStd = 1 / sqrt(_var + eps); } float gamma = weight != 0 ? weight[plane] : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; for (int batch = 0; batch < N; ++batch) { for (int n = threadIdx.x; n < S; n += blockDim.x) { float _x = x[(batch * C + plane) * S + n]; float _y = (_x - _mean) * invStd; float _z = _y * gamma + beta; y[(batch * C + plane) * S + n] = _y; z[(batch * C + plane) * S + n] = _z; } } } __global__ void edz_eydz_kernel(const float *z, const float *dz, const float *weight, const float *bias, float *edz, float *eydz, float eps, int N, int C, int S) { int plane = blockIdx.x; float norm = 1.f / (N * S); float gamma = weight != 0 ? weight[plane] : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; Float2 res = reduce<Float2, GradOp>(GradOp(gamma, beta, z, dz, C, S), plane, N, C, S); float _edz = res.v1 * norm; float _eydz = res.v2 * norm; __syncthreads(); if (threadIdx.x == 0) { edz[plane] = _edz; eydz[plane] = _eydz; } } __global__ void backward_kernel(const float *dz, const float *z, const float *var, const float *weight, const float *bias, const float *edz, const float *eydz, float *dx, float *dweight, float *dbias, float eps, int N, int C, int S) { int plane = blockIdx.x; float _edz = edz[plane]; float _eydz = eydz[plane]; float gamma = weight != 0 ? weight[plane] : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; if (dx != 0) { float _var = var[plane]; float invStd = 0; if (_var != 0.f || eps != 0.f) { invStd = 1 / sqrt(_var + eps); } float mul = gamma * invStd; for (int batch = 0; batch < N; ++batch) { for (int n = threadIdx.x; n < S; n += blockDim.x) { float _dz = dz[(batch * C + plane) * S + n]; float _y = (z[(batch * C + plane) * S + n] - beta) / gamma; dx[(batch * C + plane) * S + n] = (_dz - _edz - _y * _eydz) * mul; } } } if (dweight != 0 || dbias != 0) { float norm = N * S; if (dweight != 0) { if (threadIdx.x == 0) { dweight[plane] += _eydz * norm; } } if (dbias != 0) { if (threadIdx.x == 0) { dbias[plane] += _edz * norm; } } } } /* * Implementations */ extern "C" int _bn_mean_var_cuda(int N, int C, int S, const float *x, float *mean, float *var, hipStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); hipLaunchKernelGGL(( mean_var_kernel), dim3(blocks), dim3(threads), 0, stream, x, mean, var, N, C, S); // Check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) return 0; else return 1; } extern "C" int _bn_forward_cuda(int N, int C, int S, const float *x, const float *mean, const float *var, const float *weight, const float *bias, float *y, float *z, float eps, hipStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); hipLaunchKernelGGL(( forward_kernel), dim3(blocks), dim3(threads), 0, stream, x, mean, var, weight, bias, y, z, eps, N, C, S); // Check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) return 0; else return 1; } extern "C" int _bn_edz_eydz_cuda(int N, int C, int S, const float *z, const float *dz, const float *weight, const float *bias, float *edz, float *eydz, float eps, hipStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); hipLaunchKernelGGL(( edz_eydz_kernel), dim3(blocks), dim3(threads), 0, stream, z, dz, weight, bias, edz, eydz, eps, N, C, S); // Check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) return 0; else return 1; } extern "C" int _bn_backward_cuda(int N, int C, int S, const float *dz, const float *z, const float *var, const float *weight, const float *bias, const float *edz, const float *eydz, float *dx, float *dweight, float *dbias, float eps, hipStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); hipLaunchKernelGGL(( backward_kernel), dim3(blocks), dim3(threads), 0, stream, dz, z, var, weight, bias, edz, eydz, dx, dweight, dbias, eps, N, C, S); // Check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) return 0; else return 1; } extern "C" int _leaky_relu_cuda(int N, float *x, float slope, hipStream_t stream) { // Run using thrust thrust::device_ptr<float> th_x = thrust::device_pointer_cast(x); thrust::transform_if(thrust::hip::par.on(stream), th_x, th_x + N, th_x, [slope] __device__ (const float& x) { return x * slope; }, [] __device__ (const float& x) { return x < 0; }); // Check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) return 0; else return 1; } extern "C" int _leaky_relu_backward_cuda(int N, const float *x, float *dx, float slope, hipStream_t stream) { // Run using thrust thrust::device_ptr<const float> th_x = thrust::device_pointer_cast(x); thrust::device_ptr<float> th_dx = thrust::device_pointer_cast(dx); thrust::transform_if(thrust::hip::par.on(stream), th_dx, th_dx + N, th_x, th_dx, [slope] __device__ (const float& dx) { return dx * slope; }, [] __device__ (const float& x) { return x < 0; }); // Check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) return 0; else return 1; } extern "C" int _elu_cuda(int N, float *x, hipStream_t stream) { // Run using thrust thrust::device_ptr<float> th_x = thrust::device_pointer_cast(x); thrust::transform_if(thrust::hip::par.on(stream), th_x, th_x + N, th_x, [] __device__ (const float& x) { return exp(x) - 1.f; }, [] __device__ (const float& x) { return x < 0; }); // Check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) return 0; else return 1; } extern "C" int _elu_backward_cuda(int N, const float *x, float *dx, hipStream_t stream) { // Run using thrust thrust::device_ptr<const float> th_x = thrust::device_pointer_cast(x); thrust::device_ptr<float> th_dx = thrust::device_pointer_cast(dx); thrust::transform_if(thrust::hip::par.on(stream), th_dx, th_dx + N, th_x, th_x, th_dx, [] __device__ (const float& dx, const float& x) { return dx * (x + 1.f); }, [] __device__ (const float& x) { return x < 0; }); // Check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) return 0; else return 1; } extern "C" int _elu_inv_cuda(int N, float *x, hipStream_t stream) { // Run using thrust thrust::device_ptr<float> th_x = thrust::device_pointer_cast(x); thrust::transform_if(thrust::hip::par.on(stream), th_x, th_x + N, th_x, [] __device__ (const float& x) { return log1p(x); }, [] __device__ (const float& x) { return x < 0; }); // Check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) return 0; else return 1; }
eaf29298c492b56c6c4dc1cae144dbb04944faf8.cu
#include <thrust/device_ptr.h> #include <thrust/transform.h> #include <thrust/execution_policy.h> #include "common.h" #include "bn.h" /* * Device functions and data structures */ struct Float2 { float v1, v2; __device__ Float2() {} __device__ Float2(float _v1, float _v2) : v1(_v1), v2(_v2) {} __device__ Float2(float v) : v1(v), v2(v) {} __device__ Float2(int v) : v1(v), v2(v) {} __device__ Float2 &operator+=(const Float2 &a) { v1 += a.v1; v2 += a.v2; return *this; } }; struct SumOp { __device__ SumOp(const float *t, int c, int s) : tensor(t), C(c), S(s) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { return tensor[(batch * C + plane) * S + n]; } const float *tensor; const int C; const int S; }; struct VarOp { __device__ VarOp(float m, const float *t, int c, int s) : mean(m), tensor(t), C(c), S(s) {} __device__ __forceinline__ float operator()(int batch, int plane, int n) { float val = tensor[(batch * C + plane) * S + n]; return (val - mean) * (val - mean); } const float mean; const float *tensor; const int C; const int S; }; struct GradOp { __device__ GradOp(float _gamma, float _beta, const float *_z, const float *_dz, int c, int s) : gamma(_gamma), beta(_beta), z(_z), dz(_dz), C(c), S(s) {} __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { float _y = (z[(batch * C + plane) * S + n] - beta) / gamma; float _dz = dz[(batch * C + plane) * S + n]; return Float2(_dz, _y * _dz); } const float gamma; const float beta; const float *z; const float *dz; const int C; const int S; }; static __device__ __forceinline__ float warpSum(float val) { #if __CUDA_ARCH__ >= 300 for (int i = 0; i < getMSB(WARP_SIZE); ++i) { val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); } #else __shared__ float values[MAX_BLOCK_SIZE]; values[threadIdx.x] = val; __threadfence_block(); const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; for (int i = 1; i < WARP_SIZE; i++) { val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; } #endif return val; } static __device__ __forceinline__ Float2 warpSum(Float2 value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } template <typename T, typename Op> __device__ T reduce(Op op, int plane, int N, int C, int S) { T sum = (T)0; for (int batch = 0; batch < N; ++batch) { for (int x = threadIdx.x; x < S; x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } /* * Kernels */ __global__ void mean_var_kernel(const float *x, float *mean, float *var, int N, int C, int S) { int plane = blockIdx.x; float norm = 1.f / (N * S); float _mean = reduce<float, SumOp>(SumOp(x, C, S), plane, N, C, S) * norm; __syncthreads(); float _var = reduce<float, VarOp>(VarOp(_mean, x, C, S), plane, N, C, S) * norm; if (threadIdx.x == 0) { mean[plane] = _mean; var[plane] = _var; } } __global__ void forward_kernel(const float *x, const float *mean, const float *var, const float *weight, const float *bias, float *y, float *z, float eps, int N, int C, int S) { int plane = blockIdx.x; float _mean = mean[plane]; float _var = var[plane]; float invStd = 0; if (_var != 0.f || eps != 0.f) { invStd = 1 / sqrt(_var + eps); } float gamma = weight != 0 ? weight[plane] : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; for (int batch = 0; batch < N; ++batch) { for (int n = threadIdx.x; n < S; n += blockDim.x) { float _x = x[(batch * C + plane) * S + n]; float _y = (_x - _mean) * invStd; float _z = _y * gamma + beta; y[(batch * C + plane) * S + n] = _y; z[(batch * C + plane) * S + n] = _z; } } } __global__ void edz_eydz_kernel(const float *z, const float *dz, const float *weight, const float *bias, float *edz, float *eydz, float eps, int N, int C, int S) { int plane = blockIdx.x; float norm = 1.f / (N * S); float gamma = weight != 0 ? weight[plane] : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; Float2 res = reduce<Float2, GradOp>(GradOp(gamma, beta, z, dz, C, S), plane, N, C, S); float _edz = res.v1 * norm; float _eydz = res.v2 * norm; __syncthreads(); if (threadIdx.x == 0) { edz[plane] = _edz; eydz[plane] = _eydz; } } __global__ void backward_kernel(const float *dz, const float *z, const float *var, const float *weight, const float *bias, const float *edz, const float *eydz, float *dx, float *dweight, float *dbias, float eps, int N, int C, int S) { int plane = blockIdx.x; float _edz = edz[plane]; float _eydz = eydz[plane]; float gamma = weight != 0 ? weight[plane] : 1.f; float beta = bias != 0 ? bias[plane] : 0.f; if (dx != 0) { float _var = var[plane]; float invStd = 0; if (_var != 0.f || eps != 0.f) { invStd = 1 / sqrt(_var + eps); } float mul = gamma * invStd; for (int batch = 0; batch < N; ++batch) { for (int n = threadIdx.x; n < S; n += blockDim.x) { float _dz = dz[(batch * C + plane) * S + n]; float _y = (z[(batch * C + plane) * S + n] - beta) / gamma; dx[(batch * C + plane) * S + n] = (_dz - _edz - _y * _eydz) * mul; } } } if (dweight != 0 || dbias != 0) { float norm = N * S; if (dweight != 0) { if (threadIdx.x == 0) { dweight[plane] += _eydz * norm; } } if (dbias != 0) { if (threadIdx.x == 0) { dbias[plane] += _edz * norm; } } } } /* * Implementations */ extern "C" int _bn_mean_var_cuda(int N, int C, int S, const float *x, float *mean, float *var, cudaStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); mean_var_kernel<<<blocks, threads, 0, stream>>>(x, mean, var, N, C, S); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _bn_forward_cuda(int N, int C, int S, const float *x, const float *mean, const float *var, const float *weight, const float *bias, float *y, float *z, float eps, cudaStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); forward_kernel<<<blocks, threads, 0, stream>>>(x, mean, var, weight, bias, y, z, eps, N, C, S); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _bn_edz_eydz_cuda(int N, int C, int S, const float *z, const float *dz, const float *weight, const float *bias, float *edz, float *eydz, float eps, cudaStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); edz_eydz_kernel<<<blocks, threads, 0, stream>>>(z, dz, weight, bias, edz, eydz, eps, N, C, S); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _bn_backward_cuda(int N, int C, int S, const float *dz, const float *z, const float *var, const float *weight, const float *bias, const float *edz, const float *eydz, float *dx, float *dweight, float *dbias, float eps, cudaStream_t stream) { // Run kernel dim3 blocks(C); dim3 threads(getNumThreads(S)); backward_kernel<<<blocks, threads, 0, stream>>>(dz, z, var, weight, bias, edz, eydz, dx, dweight, dbias, eps, N, C, S); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _leaky_relu_cuda(int N, float *x, float slope, cudaStream_t stream) { // Run using thrust thrust::device_ptr<float> th_x = thrust::device_pointer_cast(x); thrust::transform_if(thrust::cuda::par.on(stream), th_x, th_x + N, th_x, [slope] __device__ (const float& x) { return x * slope; }, [] __device__ (const float& x) { return x < 0; }); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _leaky_relu_backward_cuda(int N, const float *x, float *dx, float slope, cudaStream_t stream) { // Run using thrust thrust::device_ptr<const float> th_x = thrust::device_pointer_cast(x); thrust::device_ptr<float> th_dx = thrust::device_pointer_cast(dx); thrust::transform_if(thrust::cuda::par.on(stream), th_dx, th_dx + N, th_x, th_dx, [slope] __device__ (const float& dx) { return dx * slope; }, [] __device__ (const float& x) { return x < 0; }); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _elu_cuda(int N, float *x, cudaStream_t stream) { // Run using thrust thrust::device_ptr<float> th_x = thrust::device_pointer_cast(x); thrust::transform_if(thrust::cuda::par.on(stream), th_x, th_x + N, th_x, [] __device__ (const float& x) { return exp(x) - 1.f; }, [] __device__ (const float& x) { return x < 0; }); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _elu_backward_cuda(int N, const float *x, float *dx, cudaStream_t stream) { // Run using thrust thrust::device_ptr<const float> th_x = thrust::device_pointer_cast(x); thrust::device_ptr<float> th_dx = thrust::device_pointer_cast(dx); thrust::transform_if(thrust::cuda::par.on(stream), th_dx, th_dx + N, th_x, th_x, th_dx, [] __device__ (const float& dx, const float& x) { return dx * (x + 1.f); }, [] __device__ (const float& x) { return x < 0; }); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; } extern "C" int _elu_inv_cuda(int N, float *x, cudaStream_t stream) { // Run using thrust thrust::device_ptr<float> th_x = thrust::device_pointer_cast(x); thrust::transform_if(thrust::cuda::par.on(stream), th_x, th_x + N, th_x, [] __device__ (const float& x) { return log1p(x); }, [] __device__ (const float& x) { return x < 0; }); // Check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) return 0; else return 1; }
5ce58ee4f3685e09532ef8d63f61a5fe3802184c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" __global__ void fTanh( const float* arguments, float* results, const long size ) { const int X = gridDim.x; const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x; if(index < size) { results[index] = tanh(arguments[index]); } }
5ce58ee4f3685e09532ef8d63f61a5fe3802184c.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" __global__ void fTanh( const float* arguments, float* results, const long size ) { const int X = gridDim.x; const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x; if(index < size) { results[index] = tanh(arguments[index]); } }
72ed688b54f4b049cd0b138a326b853c7be0a328.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define WARP_BITS 5 __global__ void spmv_jds_naive(float *dst_vector, const float *d_data,const int *d_index, const int *d_perm, const float *x_vec,const int *d_nzcnt,const int dim) { int ix=blockIdx.x*blockDim.x+threadIdx.x; if (ix < dim) { float sum = 0.0f; // 32 is warp size int bound=sh_zcnt_int[ix / 32]; for(int k=0;k<bound;k++ ) { int j = jds_ptr_int[k] + ix; int in = d_index[j]; float d = d_data[j]; float t = x_vec[in]; sum += d*t; } dst_vector[d_perm[ix]] = sum; // dst_vector[ix] = ix; } }
72ed688b54f4b049cd0b138a326b853c7be0a328.cu
#define WARP_BITS 5 __global__ void spmv_jds_naive(float *dst_vector, const float *d_data,const int *d_index, const int *d_perm, const float *x_vec,const int *d_nzcnt,const int dim) { int ix=blockIdx.x*blockDim.x+threadIdx.x; if (ix < dim) { float sum = 0.0f; // 32 is warp size int bound=sh_zcnt_int[ix / 32]; for(int k=0;k<bound;k++ ) { int j = jds_ptr_int[k] + ix; int in = d_index[j]; float d = d_data[j]; float t = x_vec[in]; sum += d*t; } dst_vector[d_perm[ix]] = sum; // dst_vector[ix] = ix; } }
ec27122c8f870d0b84f938b97f30ac235ee8fb2f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// @file source/nbfmm/model/double_disk.cu /// @brief The implementation of double disk shape generator. /// /// @author Mu Yang <[email protected]> /// #include <nbfmm/model.hpp> #include <cmath> #include <hiprand/hiprand_kernel.h> #include <nbfmm/core/kernel_function.hpp> #include <nbfmm/utility.hpp> /// @addtogroup impl_model /// @{ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Generate double disk shape particles /// /// @param[in] num_particle the number of particles. /// @param[in] offset the offset of previous particle positions. /// @param[out] position_previous the previous particle positions. /// __global__ void generateDoubleDiskDevice( const int num_particle, float2 offset, float2* position_previous ) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= num_particle ) { return; } position_previous[idx] += offset; } /// @} // Generate double disk shape particles void nbfmm::model::generateDoubleDisk( const int num_particle1, const int num_particle2, const float2 center_position1, const float2 center_position2, const float radius1, const float radius2, const float weight, const float eccentricity, const float tick, float2* gpuptr_position_current, float2* gpuptr_position_previous, float* gpuptr_weight_current ) { assert(eccentricity >= 0); generateDisk(num_particle1, center_position1, radius1, weight, tick, gpuptr_position_current, gpuptr_position_previous, gpuptr_weight_current); generateDisk(num_particle2, center_position2, radius2, weight, tick, gpuptr_position_current+num_particle1, gpuptr_position_previous+num_particle1, gpuptr_weight_current+num_particle1); const float2 effect1 = kernelFunction(center_position1, center_position2, weight * num_particle2); const float2 effect2 = kernelFunction(center_position2, center_position1, weight * num_particle1); float2 distance = center_position1 - center_position2; float r = sqrt(distance.x * distance.x + distance.y * distance.y); float a1 = sqrt(effect1.x * effect1.x + effect1.y * effect1.y); float a2 = sqrt(effect2.x * effect2.x + effect2.y * effect2.y); float r1 = r * num_particle2 / (num_particle1 + num_particle2); float r2 = r * num_particle1 / (num_particle1 + num_particle2); float2 offset1; offset1.x = -effect1.y; offset1.y = effect1.x; offset1 *= sqrt(r1/a1) * tick / exp2(eccentricity); offset1 -= effect1 * tick * tick * eccentricity; float2 offset2; offset2.x = -effect2.y; offset2.y = effect2.x; offset2 *= sqrt(r2/a2) * tick / exp2(eccentricity); offset2 -= effect2 * tick * tick * eccentricity; hipLaunchKernelGGL(( generateDoubleDiskDevice), dim3(kMaxBlockDim), dim3(((num_particle1-1)/kMaxBlockDim)+1), 0, 0, num_particle1, offset1, gpuptr_position_previous ); hipLaunchKernelGGL(( generateDoubleDiskDevice), dim3(kMaxBlockDim), dim3(((num_particle2-1)/kMaxBlockDim)+1), 0, 0, num_particle2, offset2, gpuptr_position_previous+num_particle1 ); }
ec27122c8f870d0b84f938b97f30ac235ee8fb2f.cu
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// @file source/nbfmm/model/double_disk.cu /// @brief The implementation of double disk shape generator. /// /// @author Mu Yang <[email protected]> /// #include <nbfmm/model.hpp> #include <cmath> #include <curand_kernel.h> #include <nbfmm/core/kernel_function.hpp> #include <nbfmm/utility.hpp> /// @addtogroup impl_model /// @{ //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Generate double disk shape particles /// /// @param[in] num_particle the number of particles. /// @param[in] offset the offset of previous particle positions. /// @param[out] position_previous the previous particle positions. /// __global__ void generateDoubleDiskDevice( const int num_particle, float2 offset, float2* position_previous ) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx >= num_particle ) { return; } position_previous[idx] += offset; } /// @} // Generate double disk shape particles void nbfmm::model::generateDoubleDisk( const int num_particle1, const int num_particle2, const float2 center_position1, const float2 center_position2, const float radius1, const float radius2, const float weight, const float eccentricity, const float tick, float2* gpuptr_position_current, float2* gpuptr_position_previous, float* gpuptr_weight_current ) { assert(eccentricity >= 0); generateDisk(num_particle1, center_position1, radius1, weight, tick, gpuptr_position_current, gpuptr_position_previous, gpuptr_weight_current); generateDisk(num_particle2, center_position2, radius2, weight, tick, gpuptr_position_current+num_particle1, gpuptr_position_previous+num_particle1, gpuptr_weight_current+num_particle1); const float2 effect1 = kernelFunction(center_position1, center_position2, weight * num_particle2); const float2 effect2 = kernelFunction(center_position2, center_position1, weight * num_particle1); float2 distance = center_position1 - center_position2; float r = sqrt(distance.x * distance.x + distance.y * distance.y); float a1 = sqrt(effect1.x * effect1.x + effect1.y * effect1.y); float a2 = sqrt(effect2.x * effect2.x + effect2.y * effect2.y); float r1 = r * num_particle2 / (num_particle1 + num_particle2); float r2 = r * num_particle1 / (num_particle1 + num_particle2); float2 offset1; offset1.x = -effect1.y; offset1.y = effect1.x; offset1 *= sqrt(r1/a1) * tick / exp2(eccentricity); offset1 -= effect1 * tick * tick * eccentricity; float2 offset2; offset2.x = -effect2.y; offset2.y = effect2.x; offset2 *= sqrt(r2/a2) * tick / exp2(eccentricity); offset2 -= effect2 * tick * tick * eccentricity; generateDoubleDiskDevice<<<kMaxBlockDim, ((num_particle1-1)/kMaxBlockDim)+1>>>( num_particle1, offset1, gpuptr_position_previous ); generateDoubleDiskDevice<<<kMaxBlockDim, ((num_particle2-1)/kMaxBlockDim)+1>>>( num_particle2, offset2, gpuptr_position_previous+num_particle1 ); }
031dba94d3803b2652bf3c022728974e22288d39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*$Id: imageFilter.cu 2016-03-04 18:27:54 (author: Reza Mokhtari)$*/ #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #include <ctype.h> #include <fcntl.h> #include <unistd.h> #include <sys/mman.h> #include <time.h> #include <sys/time.h> #include "imageFilter_kernel.hip" #define IMG_DATA_OFFSET_POS 10 #define BITS_PER_PIXEL_POS 28 int swap; void test_endianess(); void swap_bytes(char *bytes, int num_bytes); int main(int argc, char *argv[]) { int i; int fd; char *fdata; struct stat finfo; char * inputfname; char * outputfname; if (argc < 4) { printf("USAGE: %s <bitmap input filename> <bitmap output file name> <part specifier>\n", argv[0]); exit(1); } inputfname = argv[1]; outputfname = argv[2]; char partId = argv[3][0]; if(partId != 'a' && partId != 'b' && partId != 'c') { printf("Please provide a part specifier: a, b, or c\n"); exit(1); } printf("Image filter: Running...\n"); fd = open(inputfname, O_RDONLY); fstat(fd, &finfo); fdata = (char*) malloc(finfo.st_size); read (fd, fdata, finfo.st_size); if ((fdata[0] != 'B') || (fdata[1] != 'M')) { printf("File is not a valid bitmap file. Terminating the program\n"); exit(1); } test_endianess(); // will set the variable "swap" unsigned short *bitsperpixel = (unsigned short *)(&(fdata[BITS_PER_PIXEL_POS])); if (swap) { printf("swapping\n"); swap_bytes((char *)(bitsperpixel), sizeof(*bitsperpixel)); } // ensure its 3 bytes per pixel if (*bitsperpixel != 24) { printf("Error: Invalid bitmap format - "); printf("This application only accepts 24-bit pictures. Exiting\n"); exit(1); } unsigned short *data_pos = (unsigned short *)(&(fdata[IMG_DATA_OFFSET_POS])); if (swap) { swap_bytes((char *)(data_pos), sizeof(*data_pos)); } int imgdata_bytes = (int)finfo.st_size - (int)(*(data_pos)); printf("This file has %d bytes of image data, %d pixels\n", imgdata_bytes, imgdata_bytes / 3); int width = *((int*)&fdata[18]); printf("Width: %d\n", width); int height = *((int*)&fdata[22]); printf("Height: %d\n", height); int fileSize = (int) finfo.st_size; //p will point to the first pixel char* p = &(fdata[*data_pos]); //Set the number of blocks and threads dim3 grid(12, 1, 1); dim3 block(1024, 1, 1); char* d_inputPixels; hipMalloc((void**) &d_inputPixels, width * height * 3); hipMemcpy(d_inputPixels, p, width * height * 3, hipMemcpyHostToDevice); char* d_outputPixels; hipMalloc((void**) &d_outputPixels, width * height * 3); hipMemset(d_outputPixels, 0, width * height * 3); struct timeval start_tv, end_tv; time_t sec; time_t ms; time_t diff; gettimeofday(&start_tv, NULL); int numPixels_per_thread = 0; int numBlocks_row = 0; int numBlcoks_col = 0; int total_thread = grid.x * block.x; int total_pixels = width * height; if(total_pixels % total_thread == 0) numPixels_per_thread = total_pixels / total_thread; else numPixels_per_thread = total_pixels / total_thread + 1; if((width - 8) % 120 == 0) numBlcoks_col = (width - 8) / 120; else numBlcoks_col = (width - 8) / 120 + 1; if((height - 8) % 120 == 0) numBlocks_row = (height - 8) / 120; else numBlocks_row = (height - 8) / 120 + 1; int total_block = numBlocks_row * numBlcoks_col; int num_loops = (total_block % 12 == 0)? total_block / 12 : total_block / 12 + 1; if(partId == 'a') { hipLaunchKernelGGL(( imageFilterKernelPartA), dim3(grid), dim3(block), 0, 0, (char3*) d_inputPixels, (char3*) d_outputPixels, width, height , numPixels_per_thread); } else if(partId == 'b') { hipLaunchKernelGGL(( imageFilterKernelPartB), dim3(grid), dim3(block), 0, 0, (char3*) d_inputPixels, (char3*) d_outputPixels, width, height , numPixels_per_thread, total_thread); } else if(partId == 'c') { hipLaunchKernelGGL(( imageFilterKernelPartC), dim3(grid), dim3(block), 0, 0, (char3*) d_inputPixels, (char3*) d_outputPixels, width, height, numBlocks_row, numBlcoks_col, num_loops); } hipDeviceSynchronize(); gettimeofday(&end_tv, NULL); sec = end_tv.tv_sec - start_tv.tv_sec; ms = end_tv.tv_usec - start_tv.tv_usec; diff = sec * 1000000 + ms; printf("%10s:\t\t%fms\n", "Time elapsed", (double)((double)diff/1000.0)); char* outputPixels = (char*) malloc(height * width * 3); hipMemcpy(outputPixels, d_outputPixels, height * width * 3, hipMemcpyDeviceToHost); memcpy(&(fdata[*data_pos]), outputPixels, height * width * 3); FILE *writeFile; writeFile = fopen(outputfname,"w+"); for(i = 0; i < fileSize; i++) fprintf(writeFile,"%c", fdata[i]); fclose(writeFile); return 0; } void test_endianess() { unsigned int num = 0x12345678; char *low = (char *)(&(num)); if (*low == 0x78) { //dprintf("No need to swap\n"); swap = 0; } else if (*low == 0x12) { //dprintf("Need to swap\n"); swap = 1; } else { printf("Error: Invalid value found in memory\n"); exit(1); } } void swap_bytes(char *bytes, int num_bytes) { int i; char tmp; for (i = 0; i < num_bytes/2; i++) { //dprintf("Swapping %d and %d\n", bytes[i], bytes[num_bytes - i - 1]); tmp = bytes[i]; bytes[i] = bytes[num_bytes - i - 1]; bytes[num_bytes - i - 1] = tmp; } }
031dba94d3803b2652bf3c022728974e22288d39.cu
/*$Id: imageFilter.cu 2016-03-04 18:27:54 (author: Reza Mokhtari)$*/ #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #include <ctype.h> #include <fcntl.h> #include <unistd.h> #include <sys/mman.h> #include <time.h> #include <sys/time.h> #include "imageFilter_kernel.cu" #define IMG_DATA_OFFSET_POS 10 #define BITS_PER_PIXEL_POS 28 int swap; void test_endianess(); void swap_bytes(char *bytes, int num_bytes); int main(int argc, char *argv[]) { int i; int fd; char *fdata; struct stat finfo; char * inputfname; char * outputfname; if (argc < 4) { printf("USAGE: %s <bitmap input filename> <bitmap output file name> <part specifier>\n", argv[0]); exit(1); } inputfname = argv[1]; outputfname = argv[2]; char partId = argv[3][0]; if(partId != 'a' && partId != 'b' && partId != 'c') { printf("Please provide a part specifier: a, b, or c\n"); exit(1); } printf("Image filter: Running...\n"); fd = open(inputfname, O_RDONLY); fstat(fd, &finfo); fdata = (char*) malloc(finfo.st_size); read (fd, fdata, finfo.st_size); if ((fdata[0] != 'B') || (fdata[1] != 'M')) { printf("File is not a valid bitmap file. Terminating the program\n"); exit(1); } test_endianess(); // will set the variable "swap" unsigned short *bitsperpixel = (unsigned short *)(&(fdata[BITS_PER_PIXEL_POS])); if (swap) { printf("swapping\n"); swap_bytes((char *)(bitsperpixel), sizeof(*bitsperpixel)); } // ensure its 3 bytes per pixel if (*bitsperpixel != 24) { printf("Error: Invalid bitmap format - "); printf("This application only accepts 24-bit pictures. Exiting\n"); exit(1); } unsigned short *data_pos = (unsigned short *)(&(fdata[IMG_DATA_OFFSET_POS])); if (swap) { swap_bytes((char *)(data_pos), sizeof(*data_pos)); } int imgdata_bytes = (int)finfo.st_size - (int)(*(data_pos)); printf("This file has %d bytes of image data, %d pixels\n", imgdata_bytes, imgdata_bytes / 3); int width = *((int*)&fdata[18]); printf("Width: %d\n", width); int height = *((int*)&fdata[22]); printf("Height: %d\n", height); int fileSize = (int) finfo.st_size; //p will point to the first pixel char* p = &(fdata[*data_pos]); //Set the number of blocks and threads dim3 grid(12, 1, 1); dim3 block(1024, 1, 1); char* d_inputPixels; cudaMalloc((void**) &d_inputPixels, width * height * 3); cudaMemcpy(d_inputPixels, p, width * height * 3, cudaMemcpyHostToDevice); char* d_outputPixels; cudaMalloc((void**) &d_outputPixels, width * height * 3); cudaMemset(d_outputPixels, 0, width * height * 3); struct timeval start_tv, end_tv; time_t sec; time_t ms; time_t diff; gettimeofday(&start_tv, NULL); int numPixels_per_thread = 0; int numBlocks_row = 0; int numBlcoks_col = 0; int total_thread = grid.x * block.x; int total_pixels = width * height; if(total_pixels % total_thread == 0) numPixels_per_thread = total_pixels / total_thread; else numPixels_per_thread = total_pixels / total_thread + 1; if((width - 8) % 120 == 0) numBlcoks_col = (width - 8) / 120; else numBlcoks_col = (width - 8) / 120 + 1; if((height - 8) % 120 == 0) numBlocks_row = (height - 8) / 120; else numBlocks_row = (height - 8) / 120 + 1; int total_block = numBlocks_row * numBlcoks_col; int num_loops = (total_block % 12 == 0)? total_block / 12 : total_block / 12 + 1; if(partId == 'a') { imageFilterKernelPartA<<<grid, block>>>((char3*) d_inputPixels, (char3*) d_outputPixels, width, height , numPixels_per_thread); } else if(partId == 'b') { imageFilterKernelPartB<<<grid, block>>>((char3*) d_inputPixels, (char3*) d_outputPixels, width, height , numPixels_per_thread, total_thread); } else if(partId == 'c') { imageFilterKernelPartC<<<grid, block>>>((char3*) d_inputPixels, (char3*) d_outputPixels, width, height, numBlocks_row, numBlcoks_col, num_loops); } cudaThreadSynchronize(); gettimeofday(&end_tv, NULL); sec = end_tv.tv_sec - start_tv.tv_sec; ms = end_tv.tv_usec - start_tv.tv_usec; diff = sec * 1000000 + ms; printf("%10s:\t\t%fms\n", "Time elapsed", (double)((double)diff/1000.0)); char* outputPixels = (char*) malloc(height * width * 3); cudaMemcpy(outputPixels, d_outputPixels, height * width * 3, cudaMemcpyDeviceToHost); memcpy(&(fdata[*data_pos]), outputPixels, height * width * 3); FILE *writeFile; writeFile = fopen(outputfname,"w+"); for(i = 0; i < fileSize; i++) fprintf(writeFile,"%c", fdata[i]); fclose(writeFile); return 0; } void test_endianess() { unsigned int num = 0x12345678; char *low = (char *)(&(num)); if (*low == 0x78) { //dprintf("No need to swap\n"); swap = 0; } else if (*low == 0x12) { //dprintf("Need to swap\n"); swap = 1; } else { printf("Error: Invalid value found in memory\n"); exit(1); } } void swap_bytes(char *bytes, int num_bytes) { int i; char tmp; for (i = 0; i < num_bytes/2; i++) { //dprintf("Swapping %d and %d\n", bytes[i], bytes[num_bytes - i - 1]); tmp = bytes[i]; bytes[i] = bytes[num_bytes - i - 1]; bytes[num_bytes - i - 1] = tmp; } }
b0bad2138ea712759596eb05d53c1915836d37d5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> // Performs one step of the hillis and steele algorithm for integers __global__ void hs_kernel_global(int * d_out, int * d_in, int step, const int ARRAY_SIZE) { // setting ID int myId = threadIdx.x + blockDim.x * blockIdx.x; // checking if out-of-bounds if(myId >= ARRAY_SIZE) { return; } // setting itself int myVal = d_in[myId]; // finding the number to add, checking out-of-bounds int myAdd; if((myId - step)<0) { myAdd = 0; } else { myAdd = d_in[myId-step]; } // setting output d_out[myId] = myVal + myAdd; } // Performs one step of the hillis and steele algorithm for integers __global__ void hs_kernel_shared(int * d_out, int * d_in, int step, const int ARRAY_SIZE) { // sdate is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; // setting ID int myId = threadIdx.x + blockDim.x * blockIdx.x; // checking if out-of-bounds if(myId >= ARRAY_SIZE) { return; } // setting itself int myVal = d_in[myId]; // finding the number to add, checking out-of-bounds int myAdd; if((myId - step)<0) { myAdd = 0; } else { myAdd = d_in[myId-step]; } // setting output d_out[myId] = myVal + myAdd; } void hs_kernel_wrapper(int * d_out, int * d_in, const unsigned int ARRAY_SIZE, const unsigned int ARRAY_BYTES, const unsigned int num_threads) { // initializing starting variables unsigned int num_blocks = ((ARRAY_SIZE) / num_threads) + 1; int step = 1; // initializing and allocating an "intermediate" value so we don't have to change anything in d_in int * d_intermediate; hipMalloc((void **) &d_intermediate, ARRAY_BYTES); hipMemcpy(d_intermediate, d_in, ARRAY_BYTES, hipMemcpyDeviceToDevice); int i = 1; while(step<ARRAY_SIZE) // stops when step is larger than array size, happens at O(log2(ARRAY_SIZE)) { // for debugging purposes // printf("round %d: step %d\n", i, step); // i++; // one step/kernel at a time to do synchronization across blocks hipLaunchKernelGGL(( hs_kernel_global), dim3(num_blocks), dim3(num_threads), 0, 0, d_out, d_intermediate, step, ARRAY_SIZE); hipMemcpy(d_intermediate, d_out, ARRAY_BYTES, hipMemcpyDeviceToDevice); step <<= 1; // double step size at each iteration } hipFree(d_intermediate); } int main(int argc, char **argv) { printf("Hillis and Steele ONLINE... \n"); // defining vars const unsigned int num_threads = 512; const unsigned int ARRAY_SIZE = 1<<21; const unsigned int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); printf("defined vars... \n"); printf("ARRAY_SIZE: %d\n", ARRAY_SIZE); // setting host in int h_in[ARRAY_SIZE]; int h_out[ARRAY_SIZE]; for(int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = 3; h_out[i] = 0; } printf("filled array... \n"); // setting device pointers int * d_in; int * d_out; printf("defined device pointers... \n"); // allocate GPU memory hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_out, ARRAY_BYTES); printf("malloc device pointers... \n"); // transfer arrays to GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); printf("copy device pointers... \n"); // setting up time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // kernel time!!! hipEventRecord(start, 0); for (int i = 0; i < 100; i++) { hs_kernel_wrapper(d_out, d_in, ARRAY_SIZE, ARRAY_BYTES, num_threads); } hipEventRecord(stop, 0); hipEventSynchronize(stop); // calculating time float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); elapsedTime /= 100.0f; // back to host hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); // printing for(int i = 400; i<408; i++) { printf("index %d: count %d\n", i, h_out[i]); } printf("average time elapsed: %f\n", elapsedTime); // free GPU memory allocation hipFree(d_in); hipFree(d_out); return 0; }
b0bad2138ea712759596eb05d53c1915836d37d5.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> // Performs one step of the hillis and steele algorithm for integers __global__ void hs_kernel_global(int * d_out, int * d_in, int step, const int ARRAY_SIZE) { // setting ID int myId = threadIdx.x + blockDim.x * blockIdx.x; // checking if out-of-bounds if(myId >= ARRAY_SIZE) { return; } // setting itself int myVal = d_in[myId]; // finding the number to add, checking out-of-bounds int myAdd; if((myId - step)<0) { myAdd = 0; } else { myAdd = d_in[myId-step]; } // setting output d_out[myId] = myVal + myAdd; } // Performs one step of the hillis and steele algorithm for integers __global__ void hs_kernel_shared(int * d_out, int * d_in, int step, const int ARRAY_SIZE) { // sdate is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; // setting ID int myId = threadIdx.x + blockDim.x * blockIdx.x; // checking if out-of-bounds if(myId >= ARRAY_SIZE) { return; } // setting itself int myVal = d_in[myId]; // finding the number to add, checking out-of-bounds int myAdd; if((myId - step)<0) { myAdd = 0; } else { myAdd = d_in[myId-step]; } // setting output d_out[myId] = myVal + myAdd; } void hs_kernel_wrapper(int * d_out, int * d_in, const unsigned int ARRAY_SIZE, const unsigned int ARRAY_BYTES, const unsigned int num_threads) { // initializing starting variables unsigned int num_blocks = ((ARRAY_SIZE) / num_threads) + 1; int step = 1; // initializing and allocating an "intermediate" value so we don't have to change anything in d_in int * d_intermediate; cudaMalloc((void **) &d_intermediate, ARRAY_BYTES); cudaMemcpy(d_intermediate, d_in, ARRAY_BYTES, cudaMemcpyDeviceToDevice); int i = 1; while(step<ARRAY_SIZE) // stops when step is larger than array size, happens at O(log2(ARRAY_SIZE)) { // for debugging purposes // printf("round %d: step %d\n", i, step); // i++; // one step/kernel at a time to do synchronization across blocks hs_kernel_global<<<num_blocks, num_threads>>>(d_out, d_intermediate, step, ARRAY_SIZE); cudaMemcpy(d_intermediate, d_out, ARRAY_BYTES, cudaMemcpyDeviceToDevice); step <<= 1; // double step size at each iteration } cudaFree(d_intermediate); } int main(int argc, char **argv) { printf("Hillis and Steele ONLINE... \n"); // defining vars const unsigned int num_threads = 512; const unsigned int ARRAY_SIZE = 1<<21; const unsigned int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); printf("defined vars... \n"); printf("ARRAY_SIZE: %d\n", ARRAY_SIZE); // setting host in int h_in[ARRAY_SIZE]; int h_out[ARRAY_SIZE]; for(int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = 3; h_out[i] = 0; } printf("filled array... \n"); // setting device pointers int * d_in; int * d_out; printf("defined device pointers... \n"); // allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); printf("malloc device pointers... \n"); // transfer arrays to GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); printf("copy device pointers... \n"); // setting up time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // kernel time!!! cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { hs_kernel_wrapper(d_out, d_in, ARRAY_SIZE, ARRAY_BYTES, num_threads); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // calculating time float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); elapsedTime /= 100.0f; // back to host cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // printing for(int i = 400; i<408; i++) { printf("index %d: count %d\n", i, h_out[i]); } printf("average time elapsed: %f\n", elapsedTime); // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); return 0; }
4d5db241dca28a4e09d7b67167942e4d48b0d7d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_minus_4_a; int xdim0_update_halo_kernel3_minus_4_a_h = -1; __constant__ int ydim0_update_halo_kernel3_minus_4_a; int ydim0_update_halo_kernel3_minus_4_a_h = -1; __constant__ int xdim1_update_halo_kernel3_minus_4_a; int xdim1_update_halo_kernel3_minus_4_a_h = -1; __constant__ int ydim1_update_halo_kernel3_minus_4_a; int ydim1_update_halo_kernel3_minus_4_a_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel3_minus_4_a*(y)+xdim0_update_halo_kernel3_minus_4_a*ydim0_update_halo_kernel3_minus_4_a*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel3_minus_4_a*(y)+xdim1_update_halo_kernel3_minus_4_a*ydim1_update_halo_kernel3_minus_4_a*(z)) //user function __device__ inline void update_halo_kernel3_minus_4_a_gpu(double *vol_flux_x, double *mass_flux_x, const int* fields) { if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0,0,0)] = -(vol_flux_x[OPS_ACC0(4,0,0)]); if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0,0,0)] = -(mass_flux_x[OPS_ACC1(4,0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_minus_4_a( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel3_minus_4_a + idx_z * 1*1 * xdim0_update_halo_kernel3_minus_4_a * ydim0_update_halo_kernel3_minus_4_a; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel3_minus_4_a + idx_z * 1*1 * xdim1_update_halo_kernel3_minus_4_a * ydim1_update_halo_kernel3_minus_4_a; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_minus_4_a_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_4_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel3_minus_4_a_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,64)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(64,"update_halo_kernel3_minus_4_a"); OPS_kernels[64].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_minus_4_a_h || ydim0 != ydim0_update_halo_kernel3_minus_4_a_h || xdim1 != xdim1_update_halo_kernel3_minus_4_a_h || ydim1 != ydim1_update_halo_kernel3_minus_4_a_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel3_minus_4_a, &xdim0, sizeof(int) ); xdim0_update_halo_kernel3_minus_4_a_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel3_minus_4_a, &ydim0, sizeof(int) ); ydim0_update_halo_kernel3_minus_4_a_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel3_minus_4_a, &xdim1, sizeof(int) ); xdim1_update_halo_kernel3_minus_4_a_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel3_minus_4_a, &ydim1, sizeof(int) ); ydim1_update_halo_kernel3_minus_4_a_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[64].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel3_minus_4_a), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[64].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[64].mpi_time += t2-t1; OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_4_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 64; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 64; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel3_minus_4_a_execute; if (OPS_diags > 1) { ops_timing_realloc(64,"update_halo_kernel3_minus_4_a"); } ops_enqueue_kernel(desc); } #endif
4d5db241dca28a4e09d7b67167942e4d48b0d7d8.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_minus_4_a; int xdim0_update_halo_kernel3_minus_4_a_h = -1; __constant__ int ydim0_update_halo_kernel3_minus_4_a; int ydim0_update_halo_kernel3_minus_4_a_h = -1; __constant__ int xdim1_update_halo_kernel3_minus_4_a; int xdim1_update_halo_kernel3_minus_4_a_h = -1; __constant__ int ydim1_update_halo_kernel3_minus_4_a; int ydim1_update_halo_kernel3_minus_4_a_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel3_minus_4_a*(y)+xdim0_update_halo_kernel3_minus_4_a*ydim0_update_halo_kernel3_minus_4_a*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel3_minus_4_a*(y)+xdim1_update_halo_kernel3_minus_4_a*ydim1_update_halo_kernel3_minus_4_a*(z)) //user function __device__ inline void update_halo_kernel3_minus_4_a_gpu(double *vol_flux_x, double *mass_flux_x, const int* fields) { if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0,0,0)] = -(vol_flux_x[OPS_ACC0(4,0,0)]); if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0,0,0)] = -(mass_flux_x[OPS_ACC1(4,0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_minus_4_a( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel3_minus_4_a + idx_z * 1*1 * xdim0_update_halo_kernel3_minus_4_a * ydim0_update_halo_kernel3_minus_4_a; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel3_minus_4_a + idx_z * 1*1 * xdim1_update_halo_kernel3_minus_4_a * ydim1_update_halo_kernel3_minus_4_a; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_minus_4_a_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_4_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel3_minus_4_a_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,64)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(64,"update_halo_kernel3_minus_4_a"); OPS_kernels[64].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_minus_4_a_h || ydim0 != ydim0_update_halo_kernel3_minus_4_a_h || xdim1 != xdim1_update_halo_kernel3_minus_4_a_h || ydim1 != ydim1_update_halo_kernel3_minus_4_a_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel3_minus_4_a, &xdim0, sizeof(int) ); xdim0_update_halo_kernel3_minus_4_a_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel3_minus_4_a, &ydim0, sizeof(int) ); ydim0_update_halo_kernel3_minus_4_a_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel3_minus_4_a, &xdim1, sizeof(int) ); xdim1_update_halo_kernel3_minus_4_a_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel3_minus_4_a, &ydim1, sizeof(int) ); ydim1_update_halo_kernel3_minus_4_a_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[64].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel3_minus_4_a<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[64].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[64].mpi_time += t2-t1; OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_4_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 64; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 64; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel3_minus_4_a_execute; if (OPS_diags > 1) { ops_timing_realloc(64,"update_halo_kernel3_minus_4_a"); } ops_enqueue_kernel(desc); } #endif
294f567cff73a46055857c391553bef7f7b69ef7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <omp.h> #define RANDVAL 1984 #define BLOCK_SIZE 16 #define DIM 4 // Linear dimension of our grid - not counting ghost cells // Create an array that stores the number of rows of the subGrid in each device __host__ void RowCount(int devCount,int *subGridSize){ for (int i = 0; i < devCount; i++){ if (DIM % devCount == 0) subGridSize[i] = DIM / devCount; else{ if (i == 0) // If it is not possible to divide the rows equally between the devices, the first GPU will receive more rows than the others subGridSize[i] = ((int) DIM / devCount) + DIM % devCount; else subGridSize[i] = (int) DIM / devCount; } } } // Return the number of rows that exist in the main grid before the first row of the actual subgrid __host__ int rowsBefore(int device, int *subGridSize){ if (device == 0) return 0; else if (device ==1) return subGridSize[0]; else return subGridSize[device-1] + rowsBefore(device-1, subGridSize); } __host__ int getLastRow(int device, int *subGridSize){ if (device == 0) return subGridSize[0]; else return subGridSize[device] + getLastRow(device-1, subGridSize); } __host__ void buildHaloCells(int *h_grid){ // Copy halo rows for (int j = 1; j<= DIM; j++){ // Copy first real row to last halo row h_grid[(DIM+1) * (DIM+2) + j] = h_grid[1 * (DIM+2) + j]; // Copy last real row to first halo row h_grid[j] = h_grid[DIM * (DIM+2) + j]; } // Copy halo columns for (int i = 0; i<= DIM+1; i++){ // Copy first real column to last halo column h_grid[i * (DIM+2) + (DIM+1)] = h_grid[i* (DIM+2) + 1]; // Copy last real column to first halo column h_grid[i * (DIM+2)] = h_grid[i * (DIM+2) + DIM]; } } __host__ void buildSubGrid(int *h_grid, int *h_subGrid, int firstRow, int lastRow, int d){ for(int i = firstRow-1; i<= lastRow+1; i++){ for(int j = 0; j<= DIM+1; j++){ h_subGrid[i * (DIM+2) + j] = h_grid[i * (DIM+2) + j]; } } } __global__ void ghostRows(int *grid){ // We want id [1,DIM] int id = blockDim.x * blockIdx.x + threadIdx.x + 1; if (id <= DIM){ //Copy first real row to bottom ghost row grid[(DIM+2)*(DIM+1)+id] = grid[(DIM+2)+id]; //Copy last real row to top ghost row grid[id] = grid[(DIM+2)*DIM + id]; } } __global__ void ghostCols(int *grid){ // We want id [0,DIM+1] int id = blockDim.x * blockIdx.x + threadIdx.x; if (id <= DIM+1){ //Copy first real column to right most ghost column grid[id*(DIM+2)+DIM+1] = grid[id*(DIM+2)+1]; //Copy last real column to left most ghost column grid[id*(DIM+2)] = grid[id*(DIM+2) + DIM]; } } __global__ void GOL(int *grid, int *newGrid, int firstRow){ // We want id [1,DIM] int iy = blockDim.y * blockIdx.y + threadIdx.y + firstRow; int ix = blockDim.x * blockIdx.x + threadIdx.x + 1; int id = iy * (DIM+2) + ix; int numNeighbors; if (iy <= DIM && ix <= DIM) { // Get the number of neighbors for a given grid point numNeighbors = grid[id+(DIM+2)] + grid[id-(DIM+2)] //upper lower + grid[id+1] + grid[id-1] //right left + grid[id+(DIM+3)] + grid[id-(DIM+3)] //diagonals + grid[id-(DIM+1)] + grid[id+(DIM+1)]; int cell = grid[id]; //printf("firstrow: %d ID: %d Grid[%d]: %d cell: %d Neighboors: %d \n", firstRow, id, id, grid[id], cell ,numNeighbors); // Here we have explicitly all of the game rules if (cell == 1 && numNeighbors < 2) newGrid[id] = 0; else if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3)) newGrid[id] = 1; else if (cell == 1 && numNeighbors > 3) newGrid[id] = 0; else if (cell == 0 && numNeighbors == 3) newGrid[id] = 1; else newGrid[id] = cell; } } int main(int argc, char* argv[]){ int devCount; hipGetDeviceCount(&devCount); // Get the number of devices that the system have printf("There are %d devices \n", devCount); // If there is no GPU, it is not possible to run this version of Game of Life if (devCount == 0){ printf("There are no devices in this machine!"); return 0; // if there is no GPU, then break the code } int i, j, iter; int alive = 0, lim = DIM; int *h_grid; size_t gridBytes; gridBytes = sizeof(int)*(DIM+2)*(DIM+2); // 2 added for periodic boundary condition ghost cells // Alocate memory for host grid h_grid = (int*)malloc(gridBytes); srand(RANDVAL); // Assign random value to cells of the grid #pragma omp parallel for private(i,j) for(i = 1; i<=DIM; i++) { for(j = 1; j<=DIM; j++) { h_grid[i*(DIM+2)+j] = rand() % 2; } } // End of pragma printf("\nInitial Grid:\n"); for(int i = 1; i <= DIM; i++){ for(int j = 1; j <= DIM; j++){ printf("%d ", h_grid[i*(DIM+2)+j]); } printf("\n"); } dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE,1); int linGrid = (int)ceil(DIM/(float)BLOCK_SIZE); dim3 gridSize(linGrid,linGrid,1); dim3 cpyBlockSize(BLOCK_SIZE,1,1); dim3 cpyGridRowsGridSize((int)ceil(DIM/(float)cpyBlockSize.x),1,1); dim3 cpyGridColsGridSize((int)ceil((DIM+2)/(float)cpyBlockSize.x),1,1); if (devCount == 1){ int *d_grid, *d_newGrid, *d_tmpGrid; // Allocate device grids - if there is more than 1 thread, It'll allocate memory in each device hipMalloc(&d_grid, gridBytes); hipMalloc(&d_newGrid, gridBytes); // Copy over initial game grid (Dim-1 threads) hipMemcpy(d_grid, h_grid, gridBytes, hipMemcpyHostToDevice); for (iter = 0; iter < lim; iter ++){ hipLaunchKernelGGL(( ghostRows), dim3(cpyGridRowsGridSize), dim3(cpyBlockSize), 0, 0, d_grid); hipLaunchKernelGGL(( ghostCols), dim3(cpyGridColsGridSize), dim3(cpyBlockSize), 0, 0, d_grid); hipLaunchKernelGGL(( GOL), dim3(gridSize), dim3(blockSize), 0, 0, d_grid, d_newGrid,1); // Swap our grids and iterate again d_tmpGrid = d_grid; d_grid = d_newGrid; d_newGrid = d_tmpGrid; } // Copy back results and sum hipMemcpy(h_grid, d_grid, gridBytes, hipMemcpyDeviceToHost); // calculate the total of cells alive after the iteractions //#pragma omp parallel for private(i,j,alive) for (i = 1; i <= DIM; i++){ for ( j =1 ; j <= DIM; j++){ alive += h_grid[i*(DIM+2)+j]; } } // end of prama printf("There are %d cells alive after the last iteration\n", alive); // Release memory hipFree(d_grid); hipFree(d_newGrid); free(h_grid); return 1; } if (devCount > 1){ int device, firstRow, lastRow, rank, size, processFirstRow; int *h_subGrid, *h_SubGridSize, *h_tempGrid; int *d_subGrid, *d_tempSub; size_t subBytes; MPI_Status status; h_SubGridSize = (int*)malloc(sizeof(int)*devCount); // Allocate memory for the subGridSize, which stores the number of elements in each subGrids RowCount(devCount, h_SubGridSize); // Calculate the size of the subgrid in each GPU for (iter = 0; iter < lim; iter ++){ if (rank == 0){ h_tempGrid = (int*)malloc(gridBytes); buildHaloCells(h_grid); MPI_Init(); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); processFirstRow = rowsBefore(rank, DIM); for (i = 1; i< size; i++){ MPI_Send(&h_grid[processFirstRow], DIM*DIM/size, MPI_INT, i, i, MPI_COMM_WORLD); // Send a fragment of h_grid for each process MPI_Recv(&h_grid[processFirstRow], DIM*DIM/size, MPI_INT, i, 99, MPI_COMM_WORLD, &status); // Receive the fragment that contains the resulting cells after the iteration } } else{ MPI_Recv(&h_grid, DIM*DIM/size, MPI_INT, 0, 99, MPI_COMM_WORLD, &status); #pragma omp for private(device, h_subGrid, subBytes, firstRow, lastRow) for (device = 0; device < devCount; device++){ hipSetDevice(device); // Set device to be used subBytes = sizeof(int)* (DIM+2) * (DIM+2); //(h_SubGridSize[device]+2) * (DIM+2); number of rows + 2 halo/ghost rows + 2 halo/ghost columns h_subGrid = (int*)malloc(subBytes); //allocate memory for the subGrid // Allocate device grids - if there is more than 1 thread, It'll allocate memory in each device hipMalloc(&d_subGrid, subBytes); hipMalloc(&d_tempSub, subBytes); // Calculates the first row of the submatrix in the main matrix - Does not count the ghost rows firstRow = rowsBefore(device, h_SubGridSize) + 1; // Calculates the last row of the submatrix in the main matrix lastRow = getLastRow(device, h_SubGridSize); buildSubGrid(h_grid, h_subGrid, firstRow, lastRow, device); hipMemcpy(d_subGrid, h_subGrid, subBytes, hipMemcpyHostToDevice); // call GOL function and the new values will go to the d_tempSub grid hipLaunchKernelGGL(( GOL), dim3(gridSize), dim3(blockSize), 0, 0, d_subGrid, d_tempSub, firstRow); free(h_subGrid); h_subGrid = (int*)malloc(subBytes); //allocate memory for the subGrid hipMemcpy(h_subGrid, d_tempSub, subBytes, hipMemcpyDeviceToHost); for(int i = firstRow; i <= lastRow; i++){ for(int j = 1; j <= DIM; j++){ h_tempGrid[i*(DIM+2)+j] = h_subGrid[i*(DIM+2)+j]; } } } // End pragma for (i = 1; i <= DIM; i++){ for (j = 1; j <= DIM; j++){ h_grid[i * (DIM+2) + j] = h_tempGrid[i * (DIM+2) + j]; } } printf("\niteration: %d (with ghost cells)\n", iter+1); for(i = 0; i <= DIM+1; i++){ for(j = 0; j <= DIM+1; j++){ printf("%d ", h_grid[i*(DIM+2)+j]); } printf("\n"); } } // end for Device MPI_Send(&h_grid, DIM*DIM/size, MPI_INT, 0, i, MPI_COMM_WORLD); } // end if rank != 0 -> MPI } // End iteration printf("\nFinal Grid\n"); for(int i = 1; i<=DIM; i++) { for(int j = 1; j<=DIM; j++) { printf("%d ", h_grid[i*(DIM+2)+j]); } printf("\n"); } #pragma omp for private(i,j) for (int i = 1; i <= DIM; i++){ for (int j =1 ; j <= DIM; j++){ alive += h_grid[i*(DIM+2)+j]; } } // end of prama printf("There are %d cells alive after the last iteration\n", alive); // Release memory free(h_grid); hipFree(d_tempSub); return 1; } }
294f567cff73a46055857c391553bef7f7b69ef7.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <omp.h> #define RANDVAL 1984 #define BLOCK_SIZE 16 #define DIM 4 // Linear dimension of our grid - not counting ghost cells // Create an array that stores the number of rows of the subGrid in each device __host__ void RowCount(int devCount,int *subGridSize){ for (int i = 0; i < devCount; i++){ if (DIM % devCount == 0) subGridSize[i] = DIM / devCount; else{ if (i == 0) // If it is not possible to divide the rows equally between the devices, the first GPU will receive more rows than the others subGridSize[i] = ((int) DIM / devCount) + DIM % devCount; else subGridSize[i] = (int) DIM / devCount; } } } // Return the number of rows that exist in the main grid before the first row of the actual subgrid __host__ int rowsBefore(int device, int *subGridSize){ if (device == 0) return 0; else if (device ==1) return subGridSize[0]; else return subGridSize[device-1] + rowsBefore(device-1, subGridSize); } __host__ int getLastRow(int device, int *subGridSize){ if (device == 0) return subGridSize[0]; else return subGridSize[device] + getLastRow(device-1, subGridSize); } __host__ void buildHaloCells(int *h_grid){ // Copy halo rows for (int j = 1; j<= DIM; j++){ // Copy first real row to last halo row h_grid[(DIM+1) * (DIM+2) + j] = h_grid[1 * (DIM+2) + j]; // Copy last real row to first halo row h_grid[j] = h_grid[DIM * (DIM+2) + j]; } // Copy halo columns for (int i = 0; i<= DIM+1; i++){ // Copy first real column to last halo column h_grid[i * (DIM+2) + (DIM+1)] = h_grid[i* (DIM+2) + 1]; // Copy last real column to first halo column h_grid[i * (DIM+2)] = h_grid[i * (DIM+2) + DIM]; } } __host__ void buildSubGrid(int *h_grid, int *h_subGrid, int firstRow, int lastRow, int d){ for(int i = firstRow-1; i<= lastRow+1; i++){ for(int j = 0; j<= DIM+1; j++){ h_subGrid[i * (DIM+2) + j] = h_grid[i * (DIM+2) + j]; } } } __global__ void ghostRows(int *grid){ // We want id ∈ [1,DIM] int id = blockDim.x * blockIdx.x + threadIdx.x + 1; if (id <= DIM){ //Copy first real row to bottom ghost row grid[(DIM+2)*(DIM+1)+id] = grid[(DIM+2)+id]; //Copy last real row to top ghost row grid[id] = grid[(DIM+2)*DIM + id]; } } __global__ void ghostCols(int *grid){ // We want id ∈ [0,DIM+1] int id = blockDim.x * blockIdx.x + threadIdx.x; if (id <= DIM+1){ //Copy first real column to right most ghost column grid[id*(DIM+2)+DIM+1] = grid[id*(DIM+2)+1]; //Copy last real column to left most ghost column grid[id*(DIM+2)] = grid[id*(DIM+2) + DIM]; } } __global__ void GOL(int *grid, int *newGrid, int firstRow){ // We want id ∈ [1,DIM] int iy = blockDim.y * blockIdx.y + threadIdx.y + firstRow; int ix = blockDim.x * blockIdx.x + threadIdx.x + 1; int id = iy * (DIM+2) + ix; int numNeighbors; if (iy <= DIM && ix <= DIM) { // Get the number of neighbors for a given grid point numNeighbors = grid[id+(DIM+2)] + grid[id-(DIM+2)] //upper lower + grid[id+1] + grid[id-1] //right left + grid[id+(DIM+3)] + grid[id-(DIM+3)] //diagonals + grid[id-(DIM+1)] + grid[id+(DIM+1)]; int cell = grid[id]; //printf("firstrow: %d ID: %d Grid[%d]: %d cell: %d Neighboors: %d \n", firstRow, id, id, grid[id], cell ,numNeighbors); // Here we have explicitly all of the game rules if (cell == 1 && numNeighbors < 2) newGrid[id] = 0; else if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3)) newGrid[id] = 1; else if (cell == 1 && numNeighbors > 3) newGrid[id] = 0; else if (cell == 0 && numNeighbors == 3) newGrid[id] = 1; else newGrid[id] = cell; } } int main(int argc, char* argv[]){ int devCount; cudaGetDeviceCount(&devCount); // Get the number of devices that the system have printf("There are %d devices \n", devCount); // If there is no GPU, it is not possible to run this version of Game of Life if (devCount == 0){ printf("There are no devices in this machine!"); return 0; // if there is no GPU, then break the code } int i, j, iter; int alive = 0, lim = DIM; int *h_grid; size_t gridBytes; gridBytes = sizeof(int)*(DIM+2)*(DIM+2); // 2 added for periodic boundary condition ghost cells // Alocate memory for host grid h_grid = (int*)malloc(gridBytes); srand(RANDVAL); // Assign random value to cells of the grid #pragma omp parallel for private(i,j) for(i = 1; i<=DIM; i++) { for(j = 1; j<=DIM; j++) { h_grid[i*(DIM+2)+j] = rand() % 2; } } // End of pragma printf("\nInitial Grid:\n"); for(int i = 1; i <= DIM; i++){ for(int j = 1; j <= DIM; j++){ printf("%d ", h_grid[i*(DIM+2)+j]); } printf("\n"); } dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE,1); int linGrid = (int)ceil(DIM/(float)BLOCK_SIZE); dim3 gridSize(linGrid,linGrid,1); dim3 cpyBlockSize(BLOCK_SIZE,1,1); dim3 cpyGridRowsGridSize((int)ceil(DIM/(float)cpyBlockSize.x),1,1); dim3 cpyGridColsGridSize((int)ceil((DIM+2)/(float)cpyBlockSize.x),1,1); if (devCount == 1){ int *d_grid, *d_newGrid, *d_tmpGrid; // Allocate device grids - if there is more than 1 thread, It'll allocate memory in each device cudaMalloc(&d_grid, gridBytes); cudaMalloc(&d_newGrid, gridBytes); // Copy over initial game grid (Dim-1 threads) cudaMemcpy(d_grid, h_grid, gridBytes, cudaMemcpyHostToDevice); for (iter = 0; iter < lim; iter ++){ ghostRows<<<cpyGridRowsGridSize, cpyBlockSize>>>(d_grid); ghostCols<<<cpyGridColsGridSize, cpyBlockSize>>>(d_grid); GOL<<<gridSize, blockSize>>>(d_grid, d_newGrid,1); // Swap our grids and iterate again d_tmpGrid = d_grid; d_grid = d_newGrid; d_newGrid = d_tmpGrid; } // Copy back results and sum cudaMemcpy(h_grid, d_grid, gridBytes, cudaMemcpyDeviceToHost); // calculate the total of cells alive after the iteractions //#pragma omp parallel for private(i,j,alive) for (i = 1; i <= DIM; i++){ for ( j =1 ; j <= DIM; j++){ alive += h_grid[i*(DIM+2)+j]; } } // end of prama printf("There are %d cells alive after the last iteration\n", alive); // Release memory cudaFree(d_grid); cudaFree(d_newGrid); free(h_grid); return 1; } if (devCount > 1){ int device, firstRow, lastRow, rank, size, processFirstRow; int *h_subGrid, *h_SubGridSize, *h_tempGrid; int *d_subGrid, *d_tempSub; size_t subBytes; MPI_Status status; h_SubGridSize = (int*)malloc(sizeof(int)*devCount); // Allocate memory for the subGridSize, which stores the number of elements in each subGrids RowCount(devCount, h_SubGridSize); // Calculate the size of the subgrid in each GPU for (iter = 0; iter < lim; iter ++){ if (rank == 0){ h_tempGrid = (int*)malloc(gridBytes); buildHaloCells(h_grid); MPI_Init(); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); processFirstRow = rowsBefore(rank, DIM); for (i = 1; i< size; i++){ MPI_Send(&h_grid[processFirstRow], DIM*DIM/size, MPI_INT, i, i, MPI_COMM_WORLD); // Send a fragment of h_grid for each process MPI_Recv(&h_grid[processFirstRow], DIM*DIM/size, MPI_INT, i, 99, MPI_COMM_WORLD, &status); // Receive the fragment that contains the resulting cells after the iteration } } else{ MPI_Recv(&h_grid, DIM*DIM/size, MPI_INT, 0, 99, MPI_COMM_WORLD, &status); #pragma omp for private(device, h_subGrid, subBytes, firstRow, lastRow) for (device = 0; device < devCount; device++){ cudaSetDevice(device); // Set device to be used subBytes = sizeof(int)* (DIM+2) * (DIM+2); //(h_SubGridSize[device]+2) * (DIM+2); number of rows + 2 halo/ghost rows + 2 halo/ghost columns h_subGrid = (int*)malloc(subBytes); //allocate memory for the subGrid // Allocate device grids - if there is more than 1 thread, It'll allocate memory in each device cudaMalloc(&d_subGrid, subBytes); cudaMalloc(&d_tempSub, subBytes); // Calculates the first row of the submatrix in the main matrix - Does not count the ghost rows firstRow = rowsBefore(device, h_SubGridSize) + 1; // Calculates the last row of the submatrix in the main matrix lastRow = getLastRow(device, h_SubGridSize); buildSubGrid(h_grid, h_subGrid, firstRow, lastRow, device); cudaMemcpy(d_subGrid, h_subGrid, subBytes, cudaMemcpyHostToDevice); // call GOL function and the new values will go to the d_tempSub grid GOL<<<gridSize, blockSize>>>(d_subGrid, d_tempSub, firstRow); free(h_subGrid); h_subGrid = (int*)malloc(subBytes); //allocate memory for the subGrid cudaMemcpy(h_subGrid, d_tempSub, subBytes, cudaMemcpyDeviceToHost); for(int i = firstRow; i <= lastRow; i++){ for(int j = 1; j <= DIM; j++){ h_tempGrid[i*(DIM+2)+j] = h_subGrid[i*(DIM+2)+j]; } } } // End pragma for (i = 1; i <= DIM; i++){ for (j = 1; j <= DIM; j++){ h_grid[i * (DIM+2) + j] = h_tempGrid[i * (DIM+2) + j]; } } printf("\niteration: %d (with ghost cells)\n", iter+1); for(i = 0; i <= DIM+1; i++){ for(j = 0; j <= DIM+1; j++){ printf("%d ", h_grid[i*(DIM+2)+j]); } printf("\n"); } } // end for Device MPI_Send(&h_grid, DIM*DIM/size, MPI_INT, 0, i, MPI_COMM_WORLD); } // end if rank != 0 -> MPI } // End iteration printf("\nFinal Grid\n"); for(int i = 1; i<=DIM; i++) { for(int j = 1; j<=DIM; j++) { printf("%d ", h_grid[i*(DIM+2)+j]); } printf("\n"); } #pragma omp for private(i,j) for (int i = 1; i <= DIM; i++){ for (int j =1 ; j <= DIM; j++){ alive += h_grid[i*(DIM+2)+j]; } } // end of prama printf("There are %d cells alive after the last iteration\n", alive); // Release memory free(h_grid); cudaFree(d_tempSub); return 1; } }
b2959ee39c0477a29dfb30f33ecebf2262bd082e.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=64 --gridDim=1 --no-inline #include "hip/hip_runtime.h" __global__ void foo(float* A) { if(blockIdx.x == 0) { A[threadIdx.x] = 42.f; } }
b2959ee39c0477a29dfb30f33ecebf2262bd082e.cu
//pass //--blockDim=64 --gridDim=1 --no-inline #include "cuda.h" __global__ void foo(float* A) { if(blockIdx.x == 0) { A[threadIdx.x] = 42.f; } }