hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
52835e2c97f6abd81f7a1174429b6610de565091.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "cudaFont.h" #include "cudaVector.h" #include "cudaOverlay.h" #include "cudaMappedMemory.h" #include "imageIO.h" #include "filesystem.h" #include "logging.h" #define STBTT_STATIC #define STB_TRUETYPE_IMPLEMENTATION #include "../image/stb/stb_truetype.h" //#define DEBUG_FONT // Struct for one character to render struct __align__(16) GlyphCommand { short x; // x coordinate origin in output image to begin drawing the glyph at short y; // y coordinate origin in output image to begin drawing the glyph at short u; // x texture coordinate in the baked font map where the glyph resides short v; // y texture coordinate in the baked font map where the glyph resides short width; // width of the glyph in pixels short height; // height of the glyph in pixels }; // adaptFontSize float adaptFontSize( uint32_t dimension ) { const float max_font = 32.0f; const float min_font = 28.0f; const uint32_t max_dim = 1536; const uint32_t min_dim = 768; if( dimension > max_dim ) dimension = max_dim; if( dimension < min_dim ) dimension = min_dim; const float dim_ratio = float(dimension - min_dim) / float(max_dim - min_dim); return min_font + dim_ratio * (max_font - min_font); } // constructor cudaFont::cudaFont() { mCommandCPU = NULL; mCommandGPU = NULL; mCmdIndex = 0; mFontMapCPU = NULL; mFontMapGPU = NULL; mRectsCPU = NULL; mRectsGPU = NULL; mRectIndex = 0; mFontMapWidth = 256; mFontMapHeight = 256; } // destructor cudaFont::~cudaFont() { if( mRectsCPU != NULL ) { CUDA(hipHostFree(mRectsCPU)); mRectsCPU = NULL; mRectsGPU = NULL; } if( mCommandCPU != NULL ) { CUDA(hipHostFree(mCommandCPU)); mCommandCPU = NULL; mCommandGPU = NULL; } if( mFontMapCPU != NULL ) { CUDA(hipHostFree(mFontMapCPU)); mFontMapCPU = NULL; mFontMapGPU = NULL; } } // Create cudaFont* cudaFont::Create( float size ) { // default fonts std::vector<std::string> fonts; fonts.push_back("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf"); fonts.push_back("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"); return Create(fonts, size); } // Create cudaFont* cudaFont::Create( const std::vector<std::string>& fonts, float size ) { const uint32_t numFonts = fonts.size(); for( uint32_t n=0; n < numFonts; n++ ) { cudaFont* font = Create(fonts[n].c_str(), size); if( font != NULL ) return font; } return NULL; } // Create cudaFont* cudaFont::Create( const char* font, float size ) { // verify parameters if( !font ) return Create(size); // create new font cudaFont* c = new cudaFont(); if( !c ) return NULL; if( !c->init(font, size) ) { delete c; return NULL; } return c; } // init bool cudaFont::init( const char* filename, float size ) { // validate parameters if( !filename ) return NULL; // verify that the font file exists and get its size const size_t ttf_size = fileSize(filename); if( !ttf_size ) { LogError(LOG_CUDA "font doesn't exist or empty file '%s'\n", filename); return false; } // allocate memory to store the font file void* ttf_buffer = malloc(ttf_size); if( !ttf_buffer ) { LogError(LOG_CUDA "failed to allocate %zu byte buffer for reading '%s'\n", ttf_size, filename); return false; } // open the font file FILE* ttf_file = fopen(filename, "rb"); if( !ttf_file ) { LogError(LOG_CUDA "failed to open '%s' for reading\n", filename); free(ttf_buffer); return false; } // read the font file const size_t ttf_read = fread(ttf_buffer, 1, ttf_size, ttf_file); fclose(ttf_file); if( ttf_read != ttf_size ) { LogError(LOG_CUDA "failed to read contents of '%s'\n", filename); LogError(LOG_CUDA "(read %zu bytes, expected %zu bytes)\n", ttf_read, ttf_size); free(ttf_buffer); return false; } // buffer that stores the coordinates of the baked glyphs stbtt_bakedchar bakeCoords[NumGlyphs]; // increase the size of the bitmap until all the glyphs fit while(true) { // allocate memory for the packed font texture (alpha only) const size_t fontMapSize = mFontMapWidth * mFontMapHeight * sizeof(unsigned char); if( !cudaAllocMapped((void**)&mFontMapCPU, (void**)&mFontMapGPU, fontMapSize) ) { LogError(LOG_CUDA "failed to allocate %zu bytes to store %ix%i font map\n", fontMapSize, mFontMapWidth, mFontMapHeight); free(ttf_buffer); return false; } // attempt to pack the bitmap const int result = stbtt_BakeFontBitmap((uint8_t*)ttf_buffer, 0, size, mFontMapCPU, mFontMapWidth, mFontMapHeight, FirstGlyph, NumGlyphs, bakeCoords); if( result == 0 ) { LogError(LOG_CUDA "failed to bake font bitmap '%s'\n", filename); free(ttf_buffer); return false; } else if( result < 0 ) { const int glyphsPacked = -result; if( glyphsPacked == NumGlyphs ) { LogVerbose(LOG_CUDA "packed %u glyphs in %ux%u bitmap (font size=%.0fpx)\n", NumGlyphs, mFontMapWidth, mFontMapHeight, size); break; } #ifdef DEBUG_FONT LogDebug(LOG_CUDA "fit only %i of %u font glyphs in %ux%u bitmap\n", glyphsPacked, NumGlyphs, mFontMapWidth, mFontMapHeight); #endif CUDA(hipHostFree(mFontMapCPU)); mFontMapCPU = NULL; mFontMapGPU = NULL; mFontMapWidth *= 2; mFontMapHeight *= 2; #ifdef DEBUG_FONT LogDebug(LOG_CUDA "attempting to pack font with %ux%u bitmap...\n", mFontMapWidth, mFontMapHeight); #endif continue; } else { #ifdef DEBUG_FONT LogDebug(LOG_CUDA "packed %u glyphs in %ux%u bitmap (font size=%.0fpx)\n", NumGlyphs, mFontMapWidth, mFontMapHeight, size); #endif break; } } // free the TTF font data free(ttf_buffer); // store texture baking coordinates for( uint32_t n=0; n < NumGlyphs; n++ ) { mGlyphInfo[n].x = bakeCoords[n].x0; mGlyphInfo[n].y = bakeCoords[n].y0; mGlyphInfo[n].width = bakeCoords[n].x1 - bakeCoords[n].x0; mGlyphInfo[n].height = bakeCoords[n].y1 - bakeCoords[n].y0; mGlyphInfo[n].xAdvance = bakeCoords[n].xadvance; mGlyphInfo[n].xOffset = bakeCoords[n].xoff; mGlyphInfo[n].yOffset = bakeCoords[n].yoff; #ifdef DEBUG_FONT // debug info const char c = n + FirstGlyph; LogDebug("Glyph %u: '%c' width=%hu height=%hu xOffset=%.0f yOffset=%.0f xAdvance=%0.1f\n", n, c, mGlyphInfo[n].width, mGlyphInfo[n].height, mGlyphInfo[n].xOffset, mGlyphInfo[n].yOffset, mGlyphInfo[n].xAdvance); #endif } // allocate memory for GPU command buffer if( !cudaAllocMapped(&mCommandCPU, &mCommandGPU, sizeof(GlyphCommand) * MaxCommands) ) return false; // allocate memory for background rect buffers if( !cudaAllocMapped((void**)&mRectsCPU, (void**)&mRectsGPU, sizeof(float4) * MaxCommands) ) return false; return true; } /*inline __host__ __device__ float4 operator*(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }*/ inline __host__ __device__ float4 alpha_blend( const float4& bg, const float4& fg ) { const float alpha = fg.w / 255.0f; const float ialph = 1.0f - alpha; return make_float4(alpha * fg.x + ialph * bg.x, alpha * fg.y + ialph * bg.y, alpha * fg.z + ialph * bg.z, bg.w); } template<typename T> __global__ void gpuOverlayText( unsigned char* font, int fontWidth, GlyphCommand* commands, T* input, T* output, int imgWidth, int imgHeight, float4 color ) { const GlyphCommand cmd = commands[blockIdx.x]; if( threadIdx.x >= cmd.width || threadIdx.y >= cmd.height ) return; const int x = cmd.x + threadIdx.x; const int y = cmd.y + threadIdx.y; if( x < 0 || y < 0 || x >= imgWidth || y >= imgHeight ) return; const int u = cmd.u + threadIdx.x; const int v = cmd.v + threadIdx.y; const float px_glyph = font[v * fontWidth + u]; const float4 px_font = make_float4(px_glyph * color.x, px_glyph * color.y, px_glyph * color.z, px_glyph * color.w); const float4 px_in = cast_vec<float4>(input[y * imgWidth + x]); output[y * imgWidth + x] = cast_vec<T>(alpha_blend(px_in, px_font)); } // cudaOverlayText hipError_t cudaOverlayText( unsigned char* font, const int2& maxGlyphSize, size_t fontMapWidth, GlyphCommand* commands, size_t numCommands, const float4& fontColor, void* input, void* output, imageFormat format, size_t imgWidth, size_t imgHeight) { if( !font || !commands || !input || !output || numCommands == 0 || fontMapWidth == 0 || imgWidth == 0 || imgHeight == 0 ) return hipErrorInvalidValue; const float4 color_scaled = make_float4( fontColor.x / 255.0f, fontColor.y / 255.0f, fontColor.z / 255.0f, fontColor.w / 255.0f ); // setup arguments const dim3 block(maxGlyphSize.x, maxGlyphSize.y); const dim3 grid(numCommands); if( format == IMAGE_RGB8 ) hipLaunchKernelGGL(( gpuOverlayText<uchar3>), dim3(grid), dim3(block), 0, 0, font, fontMapWidth, commands, (uchar3*)input, (uchar3*)output, imgWidth, imgHeight, color_scaled); else if( format == IMAGE_RGBA8 ) hipLaunchKernelGGL(( gpuOverlayText<uchar4>), dim3(grid), dim3(block), 0, 0, font, fontMapWidth, commands, (uchar4*)input, (uchar4*)output, imgWidth, imgHeight, color_scaled); else if( format == IMAGE_RGB32F ) hipLaunchKernelGGL(( gpuOverlayText<float3>), dim3(grid), dim3(block), 0, 0, font, fontMapWidth, commands, (float3*)input, (float3*)output, imgWidth, imgHeight, color_scaled); else if( format == IMAGE_RGBA32F ) hipLaunchKernelGGL(( gpuOverlayText<float4>), dim3(grid), dim3(block), 0, 0, font, fontMapWidth, commands, (float4*)input, (float4*)output, imgWidth, imgHeight, color_scaled); else return hipErrorInvalidValue; return hipGetLastError(); } // Overlay bool cudaFont::OverlayText( void* image, imageFormat format, uint32_t width, uint32_t height, const std::vector< std::pair< std::string, int2 > >& strings, const float4& color, const float4& bg_color, int bg_padding ) { const uint32_t numStrings = strings.size(); if( !image || width == 0 || height == 0 || numStrings == 0 ) return false; if( format != IMAGE_RGB8 && format != IMAGE_RGBA8 && format != IMAGE_RGB32F && format != IMAGE_RGBA32F ) { LogError(LOG_CUDA "cudaFont::OverlayText() -- unsupported image format (%s)\n", imageFormatToStr(format)); LogError(LOG_CUDA " supported formats are:\n"); LogError(LOG_CUDA " * rgb8\n"); LogError(LOG_CUDA " * rgba8\n"); LogError(LOG_CUDA " * rgb32f\n"); LogError(LOG_CUDA " * rgba32f\n"); return false; } const bool has_bg = bg_color.w > 0.0f; int2 maxGlyphSize = make_int2(0,0); int numCommands = 0; int numRects = 0; int maxChars = 0; // find the bg rects and total char count for( uint32_t s=0; s < numStrings; s++ ) maxChars += strings[s].first.size(); // reset the buffer indices if we need the space if( mCmdIndex + maxChars >= MaxCommands ) mCmdIndex = 0; if( has_bg && mRectIndex + numStrings >= MaxCommands ) mRectIndex = 0; // generate glyph commands and bg rects for( uint32_t s=0; s < numStrings; s++ ) { const uint32_t numChars = strings[s].first.size(); if( numChars == 0 ) continue; // determine the max 'height' of the string int maxHeight = 0; for( uint32_t n=0; n < numChars; n++ ) { char c = strings[s].first[n]; if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; const int yOffset = abs((int)mGlyphInfo[c].yOffset); if( maxHeight < yOffset ) maxHeight = yOffset; } #ifdef DEBUG_FONT LogDebug(LOG_CUDA "max glyph height: %i\n", maxHeight); #endif // get the starting position of the string int2 pos = strings[s].second; if( pos.x < 0 ) pos.x = 0; if( pos.y < 0 ) pos.y = 0; pos.y += maxHeight; // reset the background rect if needed if( has_bg ) mRectsCPU[mRectIndex] = make_float4(width, height, 0, 0); // make a glyph command for each character for( uint32_t n=0; n < numChars; n++ ) { char c = strings[s].first[n]; // make sure the character is in range if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; // rebase char against glyph 0 // fill the next command GlyphCommand* cmd = ((GlyphCommand*)mCommandCPU) + mCmdIndex + numCommands; cmd->x = pos.x; cmd->y = pos.y + mGlyphInfo[c].yOffset; cmd->u = mGlyphInfo[c].x; cmd->v = mGlyphInfo[c].y; cmd->width = mGlyphInfo[c].width; cmd->height = mGlyphInfo[c].height; // advance the text position pos.x += mGlyphInfo[c].xAdvance; // track the maximum glyph size if( maxGlyphSize.x < mGlyphInfo[n].width ) maxGlyphSize.x = mGlyphInfo[n].width; if( maxGlyphSize.y < mGlyphInfo[n].height ) maxGlyphSize.y = mGlyphInfo[n].height; // expand the background rect if( has_bg ) { float4* rect = mRectsCPU + mRectIndex + numRects; if( cmd->x < rect->x ) rect->x = cmd->x; if( cmd->y < rect->y ) rect->y = cmd->y; const float x2 = cmd->x + cmd->width; const float y2 = cmd->y + cmd->height; if( x2 > rect->z ) rect->z = x2; if( y2 > rect->w ) rect->w = y2; } numCommands++; } if( has_bg ) { float4* rect = mRectsCPU + mRectIndex + numRects; // apply padding rect->x -= bg_padding; rect->y -= bg_padding; rect->z += bg_padding; rect->w += bg_padding; numRects++; } } #ifdef DEBUG_FONT LogDebug(LOG_CUDA "max glyph size is %ix%i\n", maxGlyphSize.x, maxGlyphSize.y); #endif // draw background rects if( has_bg && numRects > 0 ) CUDA(cudaRectFill(image, image, width, height, format, mRectsGPU + mRectIndex, numRects, bg_color)); // draw text characters CUDA(cudaOverlayText( mFontMapGPU, maxGlyphSize, mFontMapWidth, ((GlyphCommand*)mCommandGPU) + mCmdIndex, numCommands, color, image, image, format, width, height)); // advance the buffer indices mCmdIndex += numCommands; mRectIndex += numRects; return true; } // Overlay bool cudaFont::OverlayText( void* image, imageFormat format, uint32_t width, uint32_t height, const char* str, int x, int y, const float4& color, const float4& bg_color, int bg_padding ) { if( !str ) return NULL; std::vector< std::pair< std::string, int2 > > list; list.push_back( std::pair< std::string, int2 >( str, make_int2(x,y) )); return OverlayText(image, format, width, height, list, color, bg_color, bg_padding); } // TextExtents int4 cudaFont::TextExtents( const char* str, int x, int y ) { if( !str ) return make_int4(0,0,0,0); const size_t numChars = strlen(str); // determine the max 'height' of the string int maxHeight = 0; for( uint32_t n=0; n < numChars; n++ ) { char c = str[n]; if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; const int yOffset = abs((int)mGlyphInfo[c].yOffset); if( maxHeight < yOffset ) maxHeight = yOffset; } // get the starting position of the string int2 pos = make_int2(x,y); if( pos.x < 0 ) pos.x = 0; if( pos.y < 0 ) pos.y = 0; pos.y += maxHeight; // find the extents of the string for( uint32_t n=0; n < numChars; n++ ) { char c = str[n]; // make sure the character is in range if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; // rebase char against glyph 0 // advance the text position pos.x += mGlyphInfo[c].xAdvance; } return make_int4(x, y, pos.x, pos.y); } // Overlay bool cudaFont::OverlayText_edge_alig( void* image, imageFormat format, uint32_t width, uint32_t height, const char* str, int x, int y, const float4& color, const float4& bg_color, int bg_padding, float4* return_pos) { if( !str ) return NULL; std::vector< std::pair< std::string, int2 > > list; list.push_back( std::pair< std::string, int2 >( str, make_int2(x,y) )); return OverlayText_edge_alig(image, format, width, height, list, color, bg_color, bg_padding, return_pos); } // Overlay bool cudaFont::OverlayText_edge_alig( void* image, imageFormat format, uint32_t width, uint32_t height, const std::vector< std::pair< std::string, int2 > >& strings, const float4& color, const float4& bg_color, int bg_padding, float4* return_pos) { const uint32_t numStrings = strings.size(); if( !image || width == 0 || height == 0 || numStrings == 0 ) return false; if( format != IMAGE_RGB8 && format != IMAGE_RGBA8 && format != IMAGE_RGB32F && format != IMAGE_RGBA32F ) { LogError(LOG_CUDA "cudaFont::OverlayText_edge_alig() -- unsupported image format (%s)\n", imageFormatToStr(format)); LogError(LOG_CUDA " supported formats are:\n"); LogError(LOG_CUDA " * rgb8\n"); LogError(LOG_CUDA " * rgba8\n"); LogError(LOG_CUDA " * rgb32f\n"); LogError(LOG_CUDA " * rgba32f\n"); return false; } const bool has_bg = bg_color.w > 0.0f; int2 maxGlyphSize = make_int2(0,0); int numCommands = 0; int numRects = 0; int maxChars = 0; int rerun_flag = 2; // find the bg rects and total char count for( uint32_t s=0; s < numStrings; s++ ) maxChars += strings[s].first.size(); // reset the buffer indices if we need the space if( mCmdIndex + maxChars >= MaxCommands ) mCmdIndex = 0; if( has_bg && mRectIndex + numStrings >= MaxCommands ) mRectIndex = 0; // generate glyph commands and bg rects for( uint32_t s=0; s < numStrings; s++ ) { const uint32_t numChars = strings[s].first.size(); if( numChars == 0 ) continue; // determine the max 'height' of the string int maxHeight = 0; for( uint32_t n=0; n < numChars; n++ ) { char c = strings[s].first[n]; if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; const int yOffset = abs((int)mGlyphInfo[c].yOffset); if( maxHeight < yOffset ) maxHeight = yOffset; } #ifdef DEBUG_FONT LogDebug(LOG_CUDA "max glyph height: %i\n", maxHeight); #endif int2 pos_head = strings[s].second; int record_numCommands = numCommands; int record_numRects = numRects; while(rerun_flag > 0){ numCommands = record_numCommands; numRects = record_numRects; // get the starting position of the string int2 pos = strings[s].second; pos.x = pos_head.x; if( pos.x < 0 ) pos.x = 0; if( pos.y < 0 ) pos.y = 0; pos.y += maxHeight; // reset the background rect if needed if( has_bg ) mRectsCPU[mRectIndex] = make_float4(width, height, 0, 0); // make a glyph command for each character for( uint32_t n=0; n < numChars; n++ ) { char c = strings[s].first[n]; // make sure the character is in range if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; // rebase char against glyph 0 // fill the next command GlyphCommand* cmd = ((GlyphCommand*)mCommandCPU) + mCmdIndex + numCommands; cmd->x = pos.x; cmd->y = pos.y + mGlyphInfo[c].yOffset; cmd->u = mGlyphInfo[c].x; cmd->v = mGlyphInfo[c].y; cmd->width = mGlyphInfo[c].width; cmd->height = mGlyphInfo[c].height; // advance the text position pos.x += mGlyphInfo[c].xAdvance; // track the maximum glyph size if( maxGlyphSize.x < mGlyphInfo[n].width ) maxGlyphSize.x = mGlyphInfo[n].width; if( maxGlyphSize.y < mGlyphInfo[n].height ) maxGlyphSize.y = mGlyphInfo[n].height; // expand the background rect if( has_bg ) { float4* rect = mRectsCPU + mRectIndex + numRects; if( cmd->x < rect->x ) rect->x = cmd->x; if( cmd->y < rect->y ) rect->y = cmd->y; const float x2 = cmd->x + cmd->width; const float y2 = cmd->y + cmd->height; if( x2 > rect->z ) rect->z = x2; if( y2 > rect->w ) rect->w = y2; } numCommands++; } float4* rect_check = mRectsCPU + mRectIndex + numRects; if(rerun_flag == 2 && rect_check->z > width){ pos_head = strings[s].second; pos_head.x = pos_head.x - (rect_check->z - width); if(pos_head.x < 0) pos_head.x = 0; rerun_flag --; }else{ rerun_flag = 0; } return_pos->x = rect_check->x; return_pos->y = rect_check->y; return_pos->z = rect_check->z; return_pos->w = rect_check->w; if( has_bg ) { float4* rect = mRectsCPU + mRectIndex + numRects; // apply padding rect->x -= bg_padding; rect->y -= bg_padding; rect->z += bg_padding; rect->w += bg_padding; numRects++; } } } #ifdef DEBUG_FONT LogDebug(LOG_CUDA "max glyph size is %ix%i\n", maxGlyphSize.x, maxGlyphSize.y); #endif // draw background rects if( has_bg && numRects > 0 ) CUDA(cudaRectFill(image, image, width, height, format, mRectsGPU + mRectIndex, numRects, bg_color)); // draw text characters CUDA(cudaOverlayText( mFontMapGPU, maxGlyphSize, mFontMapWidth, ((GlyphCommand*)mCommandGPU) + mCmdIndex, numCommands, color, image, image, format, width, height)); // advance the buffer indices mCmdIndex += numCommands; mRectIndex += numRects; return true; }
52835e2c97f6abd81f7a1174429b6610de565091.cu
/* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "cudaFont.h" #include "cudaVector.h" #include "cudaOverlay.h" #include "cudaMappedMemory.h" #include "imageIO.h" #include "filesystem.h" #include "logging.h" #define STBTT_STATIC #define STB_TRUETYPE_IMPLEMENTATION #include "../image/stb/stb_truetype.h" //#define DEBUG_FONT // Struct for one character to render struct __align__(16) GlyphCommand { short x; // x coordinate origin in output image to begin drawing the glyph at short y; // y coordinate origin in output image to begin drawing the glyph at short u; // x texture coordinate in the baked font map where the glyph resides short v; // y texture coordinate in the baked font map where the glyph resides short width; // width of the glyph in pixels short height; // height of the glyph in pixels }; // adaptFontSize float adaptFontSize( uint32_t dimension ) { const float max_font = 32.0f; const float min_font = 28.0f; const uint32_t max_dim = 1536; const uint32_t min_dim = 768; if( dimension > max_dim ) dimension = max_dim; if( dimension < min_dim ) dimension = min_dim; const float dim_ratio = float(dimension - min_dim) / float(max_dim - min_dim); return min_font + dim_ratio * (max_font - min_font); } // constructor cudaFont::cudaFont() { mCommandCPU = NULL; mCommandGPU = NULL; mCmdIndex = 0; mFontMapCPU = NULL; mFontMapGPU = NULL; mRectsCPU = NULL; mRectsGPU = NULL; mRectIndex = 0; mFontMapWidth = 256; mFontMapHeight = 256; } // destructor cudaFont::~cudaFont() { if( mRectsCPU != NULL ) { CUDA(cudaFreeHost(mRectsCPU)); mRectsCPU = NULL; mRectsGPU = NULL; } if( mCommandCPU != NULL ) { CUDA(cudaFreeHost(mCommandCPU)); mCommandCPU = NULL; mCommandGPU = NULL; } if( mFontMapCPU != NULL ) { CUDA(cudaFreeHost(mFontMapCPU)); mFontMapCPU = NULL; mFontMapGPU = NULL; } } // Create cudaFont* cudaFont::Create( float size ) { // default fonts std::vector<std::string> fonts; fonts.push_back("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf"); fonts.push_back("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"); return Create(fonts, size); } // Create cudaFont* cudaFont::Create( const std::vector<std::string>& fonts, float size ) { const uint32_t numFonts = fonts.size(); for( uint32_t n=0; n < numFonts; n++ ) { cudaFont* font = Create(fonts[n].c_str(), size); if( font != NULL ) return font; } return NULL; } // Create cudaFont* cudaFont::Create( const char* font, float size ) { // verify parameters if( !font ) return Create(size); // create new font cudaFont* c = new cudaFont(); if( !c ) return NULL; if( !c->init(font, size) ) { delete c; return NULL; } return c; } // init bool cudaFont::init( const char* filename, float size ) { // validate parameters if( !filename ) return NULL; // verify that the font file exists and get its size const size_t ttf_size = fileSize(filename); if( !ttf_size ) { LogError(LOG_CUDA "font doesn't exist or empty file '%s'\n", filename); return false; } // allocate memory to store the font file void* ttf_buffer = malloc(ttf_size); if( !ttf_buffer ) { LogError(LOG_CUDA "failed to allocate %zu byte buffer for reading '%s'\n", ttf_size, filename); return false; } // open the font file FILE* ttf_file = fopen(filename, "rb"); if( !ttf_file ) { LogError(LOG_CUDA "failed to open '%s' for reading\n", filename); free(ttf_buffer); return false; } // read the font file const size_t ttf_read = fread(ttf_buffer, 1, ttf_size, ttf_file); fclose(ttf_file); if( ttf_read != ttf_size ) { LogError(LOG_CUDA "failed to read contents of '%s'\n", filename); LogError(LOG_CUDA "(read %zu bytes, expected %zu bytes)\n", ttf_read, ttf_size); free(ttf_buffer); return false; } // buffer that stores the coordinates of the baked glyphs stbtt_bakedchar bakeCoords[NumGlyphs]; // increase the size of the bitmap until all the glyphs fit while(true) { // allocate memory for the packed font texture (alpha only) const size_t fontMapSize = mFontMapWidth * mFontMapHeight * sizeof(unsigned char); if( !cudaAllocMapped((void**)&mFontMapCPU, (void**)&mFontMapGPU, fontMapSize) ) { LogError(LOG_CUDA "failed to allocate %zu bytes to store %ix%i font map\n", fontMapSize, mFontMapWidth, mFontMapHeight); free(ttf_buffer); return false; } // attempt to pack the bitmap const int result = stbtt_BakeFontBitmap((uint8_t*)ttf_buffer, 0, size, mFontMapCPU, mFontMapWidth, mFontMapHeight, FirstGlyph, NumGlyphs, bakeCoords); if( result == 0 ) { LogError(LOG_CUDA "failed to bake font bitmap '%s'\n", filename); free(ttf_buffer); return false; } else if( result < 0 ) { const int glyphsPacked = -result; if( glyphsPacked == NumGlyphs ) { LogVerbose(LOG_CUDA "packed %u glyphs in %ux%u bitmap (font size=%.0fpx)\n", NumGlyphs, mFontMapWidth, mFontMapHeight, size); break; } #ifdef DEBUG_FONT LogDebug(LOG_CUDA "fit only %i of %u font glyphs in %ux%u bitmap\n", glyphsPacked, NumGlyphs, mFontMapWidth, mFontMapHeight); #endif CUDA(cudaFreeHost(mFontMapCPU)); mFontMapCPU = NULL; mFontMapGPU = NULL; mFontMapWidth *= 2; mFontMapHeight *= 2; #ifdef DEBUG_FONT LogDebug(LOG_CUDA "attempting to pack font with %ux%u bitmap...\n", mFontMapWidth, mFontMapHeight); #endif continue; } else { #ifdef DEBUG_FONT LogDebug(LOG_CUDA "packed %u glyphs in %ux%u bitmap (font size=%.0fpx)\n", NumGlyphs, mFontMapWidth, mFontMapHeight, size); #endif break; } } // free the TTF font data free(ttf_buffer); // store texture baking coordinates for( uint32_t n=0; n < NumGlyphs; n++ ) { mGlyphInfo[n].x = bakeCoords[n].x0; mGlyphInfo[n].y = bakeCoords[n].y0; mGlyphInfo[n].width = bakeCoords[n].x1 - bakeCoords[n].x0; mGlyphInfo[n].height = bakeCoords[n].y1 - bakeCoords[n].y0; mGlyphInfo[n].xAdvance = bakeCoords[n].xadvance; mGlyphInfo[n].xOffset = bakeCoords[n].xoff; mGlyphInfo[n].yOffset = bakeCoords[n].yoff; #ifdef DEBUG_FONT // debug info const char c = n + FirstGlyph; LogDebug("Glyph %u: '%c' width=%hu height=%hu xOffset=%.0f yOffset=%.0f xAdvance=%0.1f\n", n, c, mGlyphInfo[n].width, mGlyphInfo[n].height, mGlyphInfo[n].xOffset, mGlyphInfo[n].yOffset, mGlyphInfo[n].xAdvance); #endif } // allocate memory for GPU command buffer if( !cudaAllocMapped(&mCommandCPU, &mCommandGPU, sizeof(GlyphCommand) * MaxCommands) ) return false; // allocate memory for background rect buffers if( !cudaAllocMapped((void**)&mRectsCPU, (void**)&mRectsGPU, sizeof(float4) * MaxCommands) ) return false; return true; } /*inline __host__ __device__ float4 operator*(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }*/ inline __host__ __device__ float4 alpha_blend( const float4& bg, const float4& fg ) { const float alpha = fg.w / 255.0f; const float ialph = 1.0f - alpha; return make_float4(alpha * fg.x + ialph * bg.x, alpha * fg.y + ialph * bg.y, alpha * fg.z + ialph * bg.z, bg.w); } template<typename T> __global__ void gpuOverlayText( unsigned char* font, int fontWidth, GlyphCommand* commands, T* input, T* output, int imgWidth, int imgHeight, float4 color ) { const GlyphCommand cmd = commands[blockIdx.x]; if( threadIdx.x >= cmd.width || threadIdx.y >= cmd.height ) return; const int x = cmd.x + threadIdx.x; const int y = cmd.y + threadIdx.y; if( x < 0 || y < 0 || x >= imgWidth || y >= imgHeight ) return; const int u = cmd.u + threadIdx.x; const int v = cmd.v + threadIdx.y; const float px_glyph = font[v * fontWidth + u]; const float4 px_font = make_float4(px_glyph * color.x, px_glyph * color.y, px_glyph * color.z, px_glyph * color.w); const float4 px_in = cast_vec<float4>(input[y * imgWidth + x]); output[y * imgWidth + x] = cast_vec<T>(alpha_blend(px_in, px_font)); } // cudaOverlayText cudaError_t cudaOverlayText( unsigned char* font, const int2& maxGlyphSize, size_t fontMapWidth, GlyphCommand* commands, size_t numCommands, const float4& fontColor, void* input, void* output, imageFormat format, size_t imgWidth, size_t imgHeight) { if( !font || !commands || !input || !output || numCommands == 0 || fontMapWidth == 0 || imgWidth == 0 || imgHeight == 0 ) return cudaErrorInvalidValue; const float4 color_scaled = make_float4( fontColor.x / 255.0f, fontColor.y / 255.0f, fontColor.z / 255.0f, fontColor.w / 255.0f ); // setup arguments const dim3 block(maxGlyphSize.x, maxGlyphSize.y); const dim3 grid(numCommands); if( format == IMAGE_RGB8 ) gpuOverlayText<uchar3><<<grid, block>>>(font, fontMapWidth, commands, (uchar3*)input, (uchar3*)output, imgWidth, imgHeight, color_scaled); else if( format == IMAGE_RGBA8 ) gpuOverlayText<uchar4><<<grid, block>>>(font, fontMapWidth, commands, (uchar4*)input, (uchar4*)output, imgWidth, imgHeight, color_scaled); else if( format == IMAGE_RGB32F ) gpuOverlayText<float3><<<grid, block>>>(font, fontMapWidth, commands, (float3*)input, (float3*)output, imgWidth, imgHeight, color_scaled); else if( format == IMAGE_RGBA32F ) gpuOverlayText<float4><<<grid, block>>>(font, fontMapWidth, commands, (float4*)input, (float4*)output, imgWidth, imgHeight, color_scaled); else return cudaErrorInvalidValue; return cudaGetLastError(); } // Overlay bool cudaFont::OverlayText( void* image, imageFormat format, uint32_t width, uint32_t height, const std::vector< std::pair< std::string, int2 > >& strings, const float4& color, const float4& bg_color, int bg_padding ) { const uint32_t numStrings = strings.size(); if( !image || width == 0 || height == 0 || numStrings == 0 ) return false; if( format != IMAGE_RGB8 && format != IMAGE_RGBA8 && format != IMAGE_RGB32F && format != IMAGE_RGBA32F ) { LogError(LOG_CUDA "cudaFont::OverlayText() -- unsupported image format (%s)\n", imageFormatToStr(format)); LogError(LOG_CUDA " supported formats are:\n"); LogError(LOG_CUDA " * rgb8\n"); LogError(LOG_CUDA " * rgba8\n"); LogError(LOG_CUDA " * rgb32f\n"); LogError(LOG_CUDA " * rgba32f\n"); return false; } const bool has_bg = bg_color.w > 0.0f; int2 maxGlyphSize = make_int2(0,0); int numCommands = 0; int numRects = 0; int maxChars = 0; // find the bg rects and total char count for( uint32_t s=0; s < numStrings; s++ ) maxChars += strings[s].first.size(); // reset the buffer indices if we need the space if( mCmdIndex + maxChars >= MaxCommands ) mCmdIndex = 0; if( has_bg && mRectIndex + numStrings >= MaxCommands ) mRectIndex = 0; // generate glyph commands and bg rects for( uint32_t s=0; s < numStrings; s++ ) { const uint32_t numChars = strings[s].first.size(); if( numChars == 0 ) continue; // determine the max 'height' of the string int maxHeight = 0; for( uint32_t n=0; n < numChars; n++ ) { char c = strings[s].first[n]; if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; const int yOffset = abs((int)mGlyphInfo[c].yOffset); if( maxHeight < yOffset ) maxHeight = yOffset; } #ifdef DEBUG_FONT LogDebug(LOG_CUDA "max glyph height: %i\n", maxHeight); #endif // get the starting position of the string int2 pos = strings[s].second; if( pos.x < 0 ) pos.x = 0; if( pos.y < 0 ) pos.y = 0; pos.y += maxHeight; // reset the background rect if needed if( has_bg ) mRectsCPU[mRectIndex] = make_float4(width, height, 0, 0); // make a glyph command for each character for( uint32_t n=0; n < numChars; n++ ) { char c = strings[s].first[n]; // make sure the character is in range if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; // rebase char against glyph 0 // fill the next command GlyphCommand* cmd = ((GlyphCommand*)mCommandCPU) + mCmdIndex + numCommands; cmd->x = pos.x; cmd->y = pos.y + mGlyphInfo[c].yOffset; cmd->u = mGlyphInfo[c].x; cmd->v = mGlyphInfo[c].y; cmd->width = mGlyphInfo[c].width; cmd->height = mGlyphInfo[c].height; // advance the text position pos.x += mGlyphInfo[c].xAdvance; // track the maximum glyph size if( maxGlyphSize.x < mGlyphInfo[n].width ) maxGlyphSize.x = mGlyphInfo[n].width; if( maxGlyphSize.y < mGlyphInfo[n].height ) maxGlyphSize.y = mGlyphInfo[n].height; // expand the background rect if( has_bg ) { float4* rect = mRectsCPU + mRectIndex + numRects; if( cmd->x < rect->x ) rect->x = cmd->x; if( cmd->y < rect->y ) rect->y = cmd->y; const float x2 = cmd->x + cmd->width; const float y2 = cmd->y + cmd->height; if( x2 > rect->z ) rect->z = x2; if( y2 > rect->w ) rect->w = y2; } numCommands++; } if( has_bg ) { float4* rect = mRectsCPU + mRectIndex + numRects; // apply padding rect->x -= bg_padding; rect->y -= bg_padding; rect->z += bg_padding; rect->w += bg_padding; numRects++; } } #ifdef DEBUG_FONT LogDebug(LOG_CUDA "max glyph size is %ix%i\n", maxGlyphSize.x, maxGlyphSize.y); #endif // draw background rects if( has_bg && numRects > 0 ) CUDA(cudaRectFill(image, image, width, height, format, mRectsGPU + mRectIndex, numRects, bg_color)); // draw text characters CUDA(cudaOverlayText( mFontMapGPU, maxGlyphSize, mFontMapWidth, ((GlyphCommand*)mCommandGPU) + mCmdIndex, numCommands, color, image, image, format, width, height)); // advance the buffer indices mCmdIndex += numCommands; mRectIndex += numRects; return true; } // Overlay bool cudaFont::OverlayText( void* image, imageFormat format, uint32_t width, uint32_t height, const char* str, int x, int y, const float4& color, const float4& bg_color, int bg_padding ) { if( !str ) return NULL; std::vector< std::pair< std::string, int2 > > list; list.push_back( std::pair< std::string, int2 >( str, make_int2(x,y) )); return OverlayText(image, format, width, height, list, color, bg_color, bg_padding); } // TextExtents int4 cudaFont::TextExtents( const char* str, int x, int y ) { if( !str ) return make_int4(0,0,0,0); const size_t numChars = strlen(str); // determine the max 'height' of the string int maxHeight = 0; for( uint32_t n=0; n < numChars; n++ ) { char c = str[n]; if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; const int yOffset = abs((int)mGlyphInfo[c].yOffset); if( maxHeight < yOffset ) maxHeight = yOffset; } // get the starting position of the string int2 pos = make_int2(x,y); if( pos.x < 0 ) pos.x = 0; if( pos.y < 0 ) pos.y = 0; pos.y += maxHeight; // find the extents of the string for( uint32_t n=0; n < numChars; n++ ) { char c = str[n]; // make sure the character is in range if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; // rebase char against glyph 0 // advance the text position pos.x += mGlyphInfo[c].xAdvance; } return make_int4(x, y, pos.x, pos.y); } // Overlay bool cudaFont::OverlayText_edge_alig( void* image, imageFormat format, uint32_t width, uint32_t height, const char* str, int x, int y, const float4& color, const float4& bg_color, int bg_padding, float4* return_pos) { if( !str ) return NULL; std::vector< std::pair< std::string, int2 > > list; list.push_back( std::pair< std::string, int2 >( str, make_int2(x,y) )); return OverlayText_edge_alig(image, format, width, height, list, color, bg_color, bg_padding, return_pos); } // Overlay bool cudaFont::OverlayText_edge_alig( void* image, imageFormat format, uint32_t width, uint32_t height, const std::vector< std::pair< std::string, int2 > >& strings, const float4& color, const float4& bg_color, int bg_padding, float4* return_pos) { const uint32_t numStrings = strings.size(); if( !image || width == 0 || height == 0 || numStrings == 0 ) return false; if( format != IMAGE_RGB8 && format != IMAGE_RGBA8 && format != IMAGE_RGB32F && format != IMAGE_RGBA32F ) { LogError(LOG_CUDA "cudaFont::OverlayText_edge_alig() -- unsupported image format (%s)\n", imageFormatToStr(format)); LogError(LOG_CUDA " supported formats are:\n"); LogError(LOG_CUDA " * rgb8\n"); LogError(LOG_CUDA " * rgba8\n"); LogError(LOG_CUDA " * rgb32f\n"); LogError(LOG_CUDA " * rgba32f\n"); return false; } const bool has_bg = bg_color.w > 0.0f; int2 maxGlyphSize = make_int2(0,0); int numCommands = 0; int numRects = 0; int maxChars = 0; int rerun_flag = 2; // find the bg rects and total char count for( uint32_t s=0; s < numStrings; s++ ) maxChars += strings[s].first.size(); // reset the buffer indices if we need the space if( mCmdIndex + maxChars >= MaxCommands ) mCmdIndex = 0; if( has_bg && mRectIndex + numStrings >= MaxCommands ) mRectIndex = 0; // generate glyph commands and bg rects for( uint32_t s=0; s < numStrings; s++ ) { const uint32_t numChars = strings[s].first.size(); if( numChars == 0 ) continue; // determine the max 'height' of the string int maxHeight = 0; for( uint32_t n=0; n < numChars; n++ ) { char c = strings[s].first[n]; if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; const int yOffset = abs((int)mGlyphInfo[c].yOffset); if( maxHeight < yOffset ) maxHeight = yOffset; } #ifdef DEBUG_FONT LogDebug(LOG_CUDA "max glyph height: %i\n", maxHeight); #endif int2 pos_head = strings[s].second; int record_numCommands = numCommands; int record_numRects = numRects; while(rerun_flag > 0){ numCommands = record_numCommands; numRects = record_numRects; // get the starting position of the string int2 pos = strings[s].second; pos.x = pos_head.x; if( pos.x < 0 ) pos.x = 0; if( pos.y < 0 ) pos.y = 0; pos.y += maxHeight; // reset the background rect if needed if( has_bg ) mRectsCPU[mRectIndex] = make_float4(width, height, 0, 0); // make a glyph command for each character for( uint32_t n=0; n < numChars; n++ ) { char c = strings[s].first[n]; // make sure the character is in range if( c < FirstGlyph || c > LastGlyph ) continue; c -= FirstGlyph; // rebase char against glyph 0 // fill the next command GlyphCommand* cmd = ((GlyphCommand*)mCommandCPU) + mCmdIndex + numCommands; cmd->x = pos.x; cmd->y = pos.y + mGlyphInfo[c].yOffset; cmd->u = mGlyphInfo[c].x; cmd->v = mGlyphInfo[c].y; cmd->width = mGlyphInfo[c].width; cmd->height = mGlyphInfo[c].height; // advance the text position pos.x += mGlyphInfo[c].xAdvance; // track the maximum glyph size if( maxGlyphSize.x < mGlyphInfo[n].width ) maxGlyphSize.x = mGlyphInfo[n].width; if( maxGlyphSize.y < mGlyphInfo[n].height ) maxGlyphSize.y = mGlyphInfo[n].height; // expand the background rect if( has_bg ) { float4* rect = mRectsCPU + mRectIndex + numRects; if( cmd->x < rect->x ) rect->x = cmd->x; if( cmd->y < rect->y ) rect->y = cmd->y; const float x2 = cmd->x + cmd->width; const float y2 = cmd->y + cmd->height; if( x2 > rect->z ) rect->z = x2; if( y2 > rect->w ) rect->w = y2; } numCommands++; } float4* rect_check = mRectsCPU + mRectIndex + numRects; if(rerun_flag == 2 && rect_check->z > width){ pos_head = strings[s].second; pos_head.x = pos_head.x - (rect_check->z - width); if(pos_head.x < 0) pos_head.x = 0; rerun_flag --; }else{ rerun_flag = 0; } return_pos->x = rect_check->x; return_pos->y = rect_check->y; return_pos->z = rect_check->z; return_pos->w = rect_check->w; if( has_bg ) { float4* rect = mRectsCPU + mRectIndex + numRects; // apply padding rect->x -= bg_padding; rect->y -= bg_padding; rect->z += bg_padding; rect->w += bg_padding; numRects++; } } } #ifdef DEBUG_FONT LogDebug(LOG_CUDA "max glyph size is %ix%i\n", maxGlyphSize.x, maxGlyphSize.y); #endif // draw background rects if( has_bg && numRects > 0 ) CUDA(cudaRectFill(image, image, width, height, format, mRectsGPU + mRectIndex, numRects, bg_color)); // draw text characters CUDA(cudaOverlayText( mFontMapGPU, maxGlyphSize, mFontMapWidth, ((GlyphCommand*)mCommandGPU) + mCmdIndex, numCommands, color, image, image, format, width, height)); // advance the buffer indices mCmdIndex += numCommands; mRectIndex += numRects; return true; }
b38e9028dcc061361240535a1c6157c5e0979d1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include "atomics.cuh" #include "index.cuh" #define THREADS 1024 #define BLOCKS(N) (N + THREADS - 1) / THREADS auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); #define KERNEL_RUN(NAME, DIMS, N, ...) \ [&] { \ auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); \ switch (DIMS) { \ case 1: \ hipLaunchKernelGGL(( NAME<scalar_t, 1>), dim3(BLOCKS(N)), dim3(THREADS), 0, stream, __VA_ARGS__, N); \ break; \ case 2: \ hipLaunchKernelGGL(( NAME<scalar_t, 2>), dim3(BLOCKS(N)), dim3(THREADS), 0, stream, __VA_ARGS__, N); \ break; \ case 3: \ hipLaunchKernelGGL(( NAME<scalar_t, 3>), dim3(BLOCKS(N)), dim3(THREADS), 0, stream, __VA_ARGS__, N); \ break; \ default: \ hipLaunchKernelGGL(( NAME<scalar_t, -1>), dim3(BLOCKS(N)), dim3(THREADS), 0, stream, __VA_ARGS__, N); \ } \ }() template <typename scalar_t, int64_t Dims> __global__ void scatter_mul_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src, at::cuda::detail::TensorInfo<int64_t, int64_t> index, at::cuda::detail::TensorInfo<scalar_t, int64_t> out, int64_t dim, size_t numel) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = idx; i < numel; i += stride) { int64_t srcOffset = 0, indexOffset = 0, outOffset = 0; IndexToScatterOffsets3<scalar_t, scalar_t, Dims>::compute( i, dim, index, &indexOffset, src, &srcOffset, out, &outOffset); atomMul(&out.data[outOffset], src.data[srcOffset]); } } void scatter_mul_cuda(at::Tensor src, at::Tensor index, at::Tensor out, int64_t dim) { AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_mul_kernel", [&] { KERNEL_RUN(scatter_mul_kernel, index.dim(), index.numel(), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src), at::cuda::detail::getTensorInfo<int64_t, int64_t>(index), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out), dim); }); } template <typename scalar_t, int64_t Dims> __global__ void scatter_div_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src, at::cuda::detail::TensorInfo<int64_t, int64_t> index, at::cuda::detail::TensorInfo<scalar_t, int64_t> out, int64_t dim, size_t numel) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = idx; i < numel; i += stride) { int64_t srcOffset = 0, indexOffset = 0, outOffset = 0; IndexToScatterOffsets3<scalar_t, scalar_t, Dims>::compute( i, dim, index, &indexOffset, src, &srcOffset, out, &outOffset); atomDiv(&out.data[outOffset], src.data[srcOffset]); } } void scatter_div_cuda(at::Tensor src, at::Tensor index, at::Tensor out, int64_t dim) { AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_div_kernel", [&] { KERNEL_RUN(scatter_div_kernel, index.dim(), index.numel(), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src), at::cuda::detail::getTensorInfo<int64_t, int64_t>(index), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out), dim); }); } template <typename scalar_t, int64_t Dims> __global__ void arg_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src, at::cuda::detail::TensorInfo<int64_t, int64_t> index, at::cuda::detail::TensorInfo<scalar_t, int64_t> out, at::cuda::detail::TensorInfo<int64_t, int64_t> arg, int64_t dim, size_t numel) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = idx; i < numel; i += stride) { int64_t srcOffset = 0, indexOffset = 0, outOffset = 0, argOffset = 0; IndexToScatterOffsets4<scalar_t, scalar_t, int64_t, Dims>::compute( i, dim, index, &indexOffset, src, &srcOffset, out, &outOffset, arg, &argOffset); if (src.data[srcOffset] == out.data[outOffset]) { arg.data[argOffset] = (srcOffset / src.strides[dim]) % src.sizes[dim]; } } } template <typename scalar_t, int64_t Dims> __global__ void scatter_max_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src, at::cuda::detail::TensorInfo<int64_t, int64_t> index, at::cuda::detail::TensorInfo<scalar_t, int64_t> out, int64_t dim, size_t numel) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = idx; i < numel; i += stride) { int64_t srcOffset = 0, indexOffset = 0, outOffset = 0; IndexToScatterOffsets3<scalar_t, scalar_t, Dims>::compute( i, dim, index, &indexOffset, src, &srcOffset, out, &outOffset); atomMax(&out.data[outOffset], src.data[srcOffset]); } } void scatter_max_cuda(at::Tensor src, at::Tensor index, at::Tensor out, at::Tensor arg, int64_t dim) { AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_max_kernel", [&] { auto src_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src); auto index_info = at::cuda::detail::getTensorInfo<int64_t, int64_t>(index); auto out_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out); KERNEL_RUN(scatter_max_kernel, index.dim(), index.numel(), src_info, index_info, out_info, dim); KERNEL_RUN(arg_kernel, index.dim(), index.numel(), src_info, index_info, out_info, at::cuda::detail::getTensorInfo<int64_t, int64_t>(arg), dim); }); } template <typename scalar_t, int64_t Dims> __global__ void scatter_min_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src, at::cuda::detail::TensorInfo<int64_t, int64_t> index, at::cuda::detail::TensorInfo<scalar_t, int64_t> out, int64_t dim, size_t numel) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = idx; i < numel; i += stride) { int64_t srcOffset = 0, indexOffset = 0, outOffset = 0; IndexToScatterOffsets3<scalar_t, scalar_t, Dims>::compute( i, dim, index, &indexOffset, src, &srcOffset, out, &outOffset); atomMin(&out.data[outOffset], src.data[srcOffset]); } } void scatter_min_cuda(at::Tensor src, at::Tensor index, at::Tensor out, at::Tensor arg, int64_t dim) { AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_min_kernel", [&] { auto src_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src); auto index_info = at::cuda::detail::getTensorInfo<int64_t, int64_t>(index); auto out_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out); KERNEL_RUN(scatter_min_kernel, index.dim(), index.numel(), src_info, index_info, out_info, dim); KERNEL_RUN(arg_kernel, index.dim(), index.numel(), src_info, index_info, out_info, at::cuda::detail::getTensorInfo<int64_t, int64_t>(arg), dim); }); }
b38e9028dcc061361240535a1c6157c5e0979d1e.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include "atomics.cuh" #include "index.cuh" #define THREADS 1024 #define BLOCKS(N) (N + THREADS - 1) / THREADS auto stream = at::cuda::getCurrentCUDAStream(); #define KERNEL_RUN(NAME, DIMS, N, ...) \ [&] { \ auto stream = at::cuda::getCurrentCUDAStream(); \ switch (DIMS) { \ case 1: \ NAME<scalar_t, 1><<<BLOCKS(N), THREADS, 0, stream>>>(__VA_ARGS__, N); \ break; \ case 2: \ NAME<scalar_t, 2><<<BLOCKS(N), THREADS, 0, stream>>>(__VA_ARGS__, N); \ break; \ case 3: \ NAME<scalar_t, 3><<<BLOCKS(N), THREADS, 0, stream>>>(__VA_ARGS__, N); \ break; \ default: \ NAME<scalar_t, -1><<<BLOCKS(N), THREADS, 0, stream>>>(__VA_ARGS__, N); \ } \ }() template <typename scalar_t, int64_t Dims> __global__ void scatter_mul_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src, at::cuda::detail::TensorInfo<int64_t, int64_t> index, at::cuda::detail::TensorInfo<scalar_t, int64_t> out, int64_t dim, size_t numel) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = idx; i < numel; i += stride) { int64_t srcOffset = 0, indexOffset = 0, outOffset = 0; IndexToScatterOffsets3<scalar_t, scalar_t, Dims>::compute( i, dim, index, &indexOffset, src, &srcOffset, out, &outOffset); atomMul(&out.data[outOffset], src.data[srcOffset]); } } void scatter_mul_cuda(at::Tensor src, at::Tensor index, at::Tensor out, int64_t dim) { AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_mul_kernel", [&] { KERNEL_RUN(scatter_mul_kernel, index.dim(), index.numel(), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src), at::cuda::detail::getTensorInfo<int64_t, int64_t>(index), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out), dim); }); } template <typename scalar_t, int64_t Dims> __global__ void scatter_div_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src, at::cuda::detail::TensorInfo<int64_t, int64_t> index, at::cuda::detail::TensorInfo<scalar_t, int64_t> out, int64_t dim, size_t numel) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = idx; i < numel; i += stride) { int64_t srcOffset = 0, indexOffset = 0, outOffset = 0; IndexToScatterOffsets3<scalar_t, scalar_t, Dims>::compute( i, dim, index, &indexOffset, src, &srcOffset, out, &outOffset); atomDiv(&out.data[outOffset], src.data[srcOffset]); } } void scatter_div_cuda(at::Tensor src, at::Tensor index, at::Tensor out, int64_t dim) { AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_div_kernel", [&] { KERNEL_RUN(scatter_div_kernel, index.dim(), index.numel(), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src), at::cuda::detail::getTensorInfo<int64_t, int64_t>(index), at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out), dim); }); } template <typename scalar_t, int64_t Dims> __global__ void arg_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src, at::cuda::detail::TensorInfo<int64_t, int64_t> index, at::cuda::detail::TensorInfo<scalar_t, int64_t> out, at::cuda::detail::TensorInfo<int64_t, int64_t> arg, int64_t dim, size_t numel) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = idx; i < numel; i += stride) { int64_t srcOffset = 0, indexOffset = 0, outOffset = 0, argOffset = 0; IndexToScatterOffsets4<scalar_t, scalar_t, int64_t, Dims>::compute( i, dim, index, &indexOffset, src, &srcOffset, out, &outOffset, arg, &argOffset); if (src.data[srcOffset] == out.data[outOffset]) { arg.data[argOffset] = (srcOffset / src.strides[dim]) % src.sizes[dim]; } } } template <typename scalar_t, int64_t Dims> __global__ void scatter_max_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src, at::cuda::detail::TensorInfo<int64_t, int64_t> index, at::cuda::detail::TensorInfo<scalar_t, int64_t> out, int64_t dim, size_t numel) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = idx; i < numel; i += stride) { int64_t srcOffset = 0, indexOffset = 0, outOffset = 0; IndexToScatterOffsets3<scalar_t, scalar_t, Dims>::compute( i, dim, index, &indexOffset, src, &srcOffset, out, &outOffset); atomMax(&out.data[outOffset], src.data[srcOffset]); } } void scatter_max_cuda(at::Tensor src, at::Tensor index, at::Tensor out, at::Tensor arg, int64_t dim) { AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_max_kernel", [&] { auto src_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src); auto index_info = at::cuda::detail::getTensorInfo<int64_t, int64_t>(index); auto out_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out); KERNEL_RUN(scatter_max_kernel, index.dim(), index.numel(), src_info, index_info, out_info, dim); KERNEL_RUN(arg_kernel, index.dim(), index.numel(), src_info, index_info, out_info, at::cuda::detail::getTensorInfo<int64_t, int64_t>(arg), dim); }); } template <typename scalar_t, int64_t Dims> __global__ void scatter_min_kernel(at::cuda::detail::TensorInfo<scalar_t, int64_t> src, at::cuda::detail::TensorInfo<int64_t, int64_t> index, at::cuda::detail::TensorInfo<scalar_t, int64_t> out, int64_t dim, size_t numel) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; for (ptrdiff_t i = idx; i < numel; i += stride) { int64_t srcOffset = 0, indexOffset = 0, outOffset = 0; IndexToScatterOffsets3<scalar_t, scalar_t, Dims>::compute( i, dim, index, &indexOffset, src, &srcOffset, out, &outOffset); atomMin(&out.data[outOffset], src.data[srcOffset]); } } void scatter_min_cuda(at::Tensor src, at::Tensor index, at::Tensor out, at::Tensor arg, int64_t dim) { AT_DISPATCH_ALL_TYPES(src.scalar_type(), "scatter_min_kernel", [&] { auto src_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(src); auto index_info = at::cuda::detail::getTensorInfo<int64_t, int64_t>(index); auto out_info = at::cuda::detail::getTensorInfo<scalar_t, int64_t>(out); KERNEL_RUN(scatter_min_kernel, index.dim(), index.numel(), src_info, index_info, out_info, dim); KERNEL_RUN(arg_kernel, index.dim(), index.numel(), src_info, index_info, out_info, at::cuda::detail::getTensorInfo<int64_t, int64_t>(arg), dim); }); }
29d9f905d2ff679247b7d63a0b4d12ee1e4c665f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file BounceBackNVEGPU.cu * \brief Template specialization of CUDA kernels for BounceBackNVEGPU geometries. Each instance of * the nve_bounce_step_one must be templated explicitly for each geometry. */ #include "BounceBackNVEGPU.cuh" #include "StreamingGeometry.h" namespace mpcd { namespace gpu { //! Template instantiation of slit geometry streaming template hipError_t nve_bounce_step_one<mpcd::detail::SlitGeometry>(const bounce_args_t& args, const mpcd::detail::SlitGeometry& geom); //! Template instantiation of slit pore geometry streaming template hipError_t nve_bounce_step_one<mpcd::detail::SlitPoreGeometry>(const bounce_args_t& args, const mpcd::detail::SlitPoreGeometry& geom); namespace kernel { //! Kernel for applying second step of velocity Verlet algorithm with bounce back /*! * \param d_vel Particle velocities * \param d_accel Particle accelerations * \param d_net_force Net force on each particle * \param d_group Indexes in particle group * \param dt Timestep * \param N Number of particles in group * * \b Implementation: * Using one thread per particle, the particle velocities are updated according to the second step * of the velocity Verlet algorithm. This is the standard update as in MD, and is only reimplemented * here in case future modifications are necessary. */ __global__ void nve_bounce_step_two(Scalar4* d_vel, Scalar3* d_accel, const Scalar4* d_net_force, const unsigned int* d_group, const Scalar dt, const unsigned int N) { // one thread per particle unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; const unsigned int pid = d_group[idx]; const Scalar4 net_force = d_net_force[pid]; Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z); Scalar4 vel = d_vel[pid]; accel.x /= vel.w; accel.y /= vel.w; accel.z /= vel.w; // then, update the velocity vel.x += Scalar(0.5) * accel.x * dt; vel.y += Scalar(0.5) * accel.y * dt; vel.z += Scalar(0.5) * accel.z * dt; d_vel[pid] = vel; d_accel[pid] = accel; } } // end namespace kernel /*! * \param d_vel Particle velocities * \param d_accel Particle accelerations * \param d_net_force Net force on each particle * \param d_group Indexes in particle group * \param dt Timestep * \param N Number of particles in group * \param block_size Number of threads per block * * \sa kernel::nve_bounce_step_two */ hipError_t nve_bounce_step_two(Scalar4* d_vel, Scalar3* d_accel, const Scalar4* d_net_force, const unsigned int* d_group, const Scalar dt, const unsigned int N, const unsigned int block_size) { unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)kernel::nve_bounce_step_two); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N / run_block_size + 1); hipLaunchKernelGGL(( kernel::nve_bounce_step_two), dim3(grid), dim3(run_block_size), 0, 0, d_vel, d_accel, d_net_force, d_group, dt, N); return hipSuccess; } } // end namespace gpu } // end namespace mpcd
29d9f905d2ff679247b7d63a0b4d12ee1e4c665f.cu
// Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file BounceBackNVEGPU.cu * \brief Template specialization of CUDA kernels for BounceBackNVEGPU geometries. Each instance of * the nve_bounce_step_one must be templated explicitly for each geometry. */ #include "BounceBackNVEGPU.cuh" #include "StreamingGeometry.h" namespace mpcd { namespace gpu { //! Template instantiation of slit geometry streaming template cudaError_t nve_bounce_step_one<mpcd::detail::SlitGeometry>(const bounce_args_t& args, const mpcd::detail::SlitGeometry& geom); //! Template instantiation of slit pore geometry streaming template cudaError_t nve_bounce_step_one<mpcd::detail::SlitPoreGeometry>(const bounce_args_t& args, const mpcd::detail::SlitPoreGeometry& geom); namespace kernel { //! Kernel for applying second step of velocity Verlet algorithm with bounce back /*! * \param d_vel Particle velocities * \param d_accel Particle accelerations * \param d_net_force Net force on each particle * \param d_group Indexes in particle group * \param dt Timestep * \param N Number of particles in group * * \b Implementation: * Using one thread per particle, the particle velocities are updated according to the second step * of the velocity Verlet algorithm. This is the standard update as in MD, and is only reimplemented * here in case future modifications are necessary. */ __global__ void nve_bounce_step_two(Scalar4* d_vel, Scalar3* d_accel, const Scalar4* d_net_force, const unsigned int* d_group, const Scalar dt, const unsigned int N) { // one thread per particle unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; const unsigned int pid = d_group[idx]; const Scalar4 net_force = d_net_force[pid]; Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z); Scalar4 vel = d_vel[pid]; accel.x /= vel.w; accel.y /= vel.w; accel.z /= vel.w; // then, update the velocity vel.x += Scalar(0.5) * accel.x * dt; vel.y += Scalar(0.5) * accel.y * dt; vel.z += Scalar(0.5) * accel.z * dt; d_vel[pid] = vel; d_accel[pid] = accel; } } // end namespace kernel /*! * \param d_vel Particle velocities * \param d_accel Particle accelerations * \param d_net_force Net force on each particle * \param d_group Indexes in particle group * \param dt Timestep * \param N Number of particles in group * \param block_size Number of threads per block * * \sa kernel::nve_bounce_step_two */ cudaError_t nve_bounce_step_two(Scalar4* d_vel, Scalar3* d_accel, const Scalar4* d_net_force, const unsigned int* d_group, const Scalar dt, const unsigned int N, const unsigned int block_size) { unsigned int max_block_size; cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void*)kernel::nve_bounce_step_two); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(block_size, max_block_size); dim3 grid(N / run_block_size + 1); kernel::nve_bounce_step_two<<<grid, run_block_size>>>(d_vel, d_accel, d_net_force, d_group, dt, N); return cudaSuccess; } } // end namespace gpu } // end namespace mpcd
70222ae22998222a6c4a23eef0a5b705bdd7c71d.hip
// !!! This is a file automatically generated by hipify!!! /** * (C) Copyright 2020 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "cuda_math_util.h" #include "pulsed_weight_updater.h" #include <hipcub/hipcub.hpp> #include <algorithm> #include <chrono> #include <cmath> #include <iostream> #include <memory> #include <random> #include "io_iterator.h" #include "pwu_kernel_parameter.h" #include "rpucuda_pulsed_device.h" namespace RPU { /****************************************************************************************************************/ /* PULSEDWEIGHTUPDATER */ /******************************************************************************************************************/ template <typename T> PulsedWeightUpdater<T>::PulsedWeightUpdater(CudaContext *c, int x_size, int d_size) : context_{c}, x_size_{x_size}, d_size_{d_size} { blm_ = make_unique<BitLineMaker<T>>(c, x_size, d_size); up_context_ = nullptr; is_async_update_ = false; }; template <typename T> pwukpvec_t<T> PulsedWeightUpdater<T>::getValidUpdateKernels( PulsedRPUDeviceCudaBase<T> *rpucuda_device, int m_batch, const PulsedUpdateMetaParameter<T> &up) { pwukpvec_t<T> v; for (int use_bo64 : {1, 0}) { // omit 2 (ie bo64 translation) for (int out_trans : {true, false}) { pwukpvec_t<T> v2 = rpucuda_device->getUpdateKernels(m_batch, up.getNK32Default(), use_bo64, out_trans, up); for (int i = 0; i < v2.size(); i++) { if (v2[i]->isValid()) { v.push_back(v2[i]); } } } if (v.size() > 0 && (m_batch >= 1000)) { break; // prefer bo64 for large batch if possible } } return v; } template <typename T> void PulsedWeightUpdater<T>::makeUpdateAsync() { if (!is_async_update_) { is_async_update_ = true; up_context_ = make_unique<CudaContext>(context_->getGPUId()); } } template <typename T> void PulsedWeightUpdater<T>::waitForUpdateCalculations() { if (is_async_update_) { // use the up_context event for it because context_ might be shared context_->recordWaitEvent(up_context_->getStream(), up_context_->getEvent()); } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::executeUpdate( pwukp_t<T> kpars, XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, PulsedRPUDeviceCudaBase<T> *rpucuda_device, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans_in, const bool d_trans_in) { blm_->makeCounts( x_in, d_in, up, rpucuda_device->getDwMin(), lr, m_batch, x_trans_in, d_trans_in, kpars->getOutTrans(), kpars->getUseBo64()); CudaContext *c = context_; if (is_async_update_) { up_context_->recordWaitEvent(context_->getStream(), context_->getEvent()); c = &*up_context_; } rpucuda_device->runUpdateKernel( kpars, c, dev_weights, m_batch, &*blm_, up, c->getRandomStates(kpars->getNStates())); } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::tuneUpdate( pwukp_t<T> &opt_kernel_pars, pwukpvec_t<T> &v, XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, PulsedRPUDeviceCudaBase<T> *rpucuda_device, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans_in, const bool d_trans_in) { bool is_async_update = is_async_update_; is_async_update_ = false; CUDA_TIMING_INIT; int nrepeats = 3; CudaArray<T> dev_tmp_weights(context_, x_size_ * d_size_); auto *tmp_device = rpucuda_device->clone(); PulsedUpdateMetaParameter<T> up_tuning(up); up_tuning._currently_tuning = true; dev_tmp_weights.assignFromDevice(dev_weights); context_->synchronizeDevice(); // maybe other streams exist. T min_timing = FLT_MAX; int min_i = 0; for (int k = 0; k < v.size(); k++) { CUDA_TIMING_START(*context_); for (int i = 0; i < nrepeats; i++) { this->executeUpdate( v[k], x_in, d_in, dev_tmp_weights.getData(), tmp_device, up_tuning, lr, m_batch, x_trans_in, d_trans_in); } CUDA_TIMING_STOP_NO_OUTPUT(*context_); v[k]->timing = milliseconds / nrepeats; if (v[k]->timing < min_timing) { min_timing = v[k]->timing; min_i = k; } } CUDA_TIMING_DESTROY; is_async_update_ = is_async_update; opt_kernel_pars = v[min_i]; delete tmp_device; DEBUG_OUT( "UpdateTuner: Using " << opt_kernel_pars->getName() << " for PWU [" << opt_kernel_pars->timing << "].\n\n"); DEBUG_CALL(opt_kernel_pars->print()); } template <typename T> template <typename InputIteratorT> const T *PulsedWeightUpdater<T>::copyIterator2Buffer( InputIteratorT vec, std::shared_ptr<CudaArray<T>> &buffer, int size) { if ((buffer == nullptr) || (buffer->getSize() < size)) { buffer = std::shared_ptr<CudaArray<T>>(new CudaArray<T>(context_, size)); } RPU::math::copyWithIterator(context_, buffer->getData(), vec, size); return buffer->getDataConst(); } template <> template <> const float *PulsedWeightUpdater<float>::copyIterator2Buffer( const float *vec, std::shared_ptr<CudaArray<float>> &buffer, int size) { return vec; } #ifdef RPU_USE_DOUBLE template <> template <> const double *PulsedWeightUpdater<double>::copyIterator2Buffer( const double *vec, std::shared_ptr<CudaArray<double>> &buffer, int size) { return vec; } #endif template <typename T> void PulsedWeightUpdater<T>::setSharedBuffer( int m_batch, std::shared_ptr<CudaArray<T>> x_buffer, std::shared_ptr<CudaArray<T>> d_buffer) { if (x_buffer) { dev_fpx_buffer_ = x_buffer; if (dev_fpx_buffer_->getSize() < m_batch * x_size_) { RPU_FATAL("X batch buffer size too small."); } } if (d_buffer) { dev_fpd_buffer_ = d_buffer; if (dev_fpd_buffer_->getSize() < m_batch * d_size_) { RPU_FATAL("D batch buffer size too small."); } } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::doFPupdate( XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, const T lr, const int m_batch, const bool x_trans, const bool d_trans, const T beta) { const T *x_out = copyIterator2Buffer(x_in, dev_fpx_buffer_, x_size_ * m_batch); const T *d_out = copyIterator2Buffer(d_in, dev_fpd_buffer_, d_size_ * m_batch); if (m_batch == 1 && beta == 1.0) { RPU::math::ger<T>(context_, d_size_, x_size_, -lr, d_out, 1, x_out, 1, dev_weights, d_size_); } else { RPU::math::gemm<T>( context_, d_trans, !x_trans, d_size_, // M x_size_, // N m_batch, // K -lr, d_out, d_trans ? m_batch : d_size_, x_out, x_trans ? m_batch : x_size_, beta, dev_weights, d_size_); } } template <typename T> bool PulsedWeightUpdater<T>::checkForFPUpdate( AbstractRPUDeviceCuda<T> *rpucuda_device_in, const PulsedUpdateMetaParameter<T> &up) { return (up.pulse_type == PulseType::None) || (rpucuda_device_in == nullptr) || !rpucuda_device_in->isPulsedDevice() || (rpucuda_device_in->implements() == DeviceUpdateType::FloatingPoint); } #define FORCE_TUNING_THRES 0 template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::update( XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, AbstractRPUDeviceCuda<T> *rpucuda_device_in, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans, const bool d_trans) { // FP update if no device is given if (checkForFPUpdate(rpucuda_device_in, up) || (up.pulse_type == PulseType::NoneWithDevice)) { doFPupdate(x_in, d_in, dev_weights, lr, m_batch, x_trans, d_trans); if (up.pulse_type == PulseType::NoneWithDevice) { // apply bounds rpucuda_device_in->clipWeights(dev_weights, -1.0); } return; } // safe because of isPulsedDevice PulsedRPUDeviceCudaBase<T> *rpucuda_device = static_cast<PulsedRPUDeviceCudaBase<T> *>(rpucuda_device_in); bool force_tuning = false; // check need for init (or re-init) DeviceUpdateType update_type = rpucuda_device->implements(); if (update_type != update_type_) //|| (!blm_->checkBuffer(m_batch,BL))) { // we do not check for change in x_size/d_size, but they are assumed to be constant as well! force_tuning = true; update_type_ = update_type; // init buffers update_count_ = 0; // init kernels valid_kernels_ = getValidUpdateKernels(rpucuda_device, m_batch, up); if (valid_kernels_.size() == 0) { RPU_FATAL("Cannot find valid update kernels"); } kernel_pars_ = valid_kernels_[0]; // this will be modified if tuned if (up._debug_kernel_index >= 0) { // set default for debugging // just get a valid kpars (will be overwritten if tuning is used below) force_tuning = false; int kidx = up._debug_kernel_index; if (up._debug_kernel_index >= valid_kernels_.size()) { std::cout << "DEBUG WARNING: kernel index out of range " << valid_kernels_.size() << std::endl; kidx = 0; } kernel_pars_ = valid_kernels_[kidx]; if (kernel_pars_->getUseBo64() == 1) { std::cout << "DEBUG WARNING: cannot test BO64 direct. Set to translate " << std::endl; kernel_pars_->forceBo64Translate(); } if (kidx == 0) { kernel_pars_->force32(); // debug hack: might break kernel in the worst case kernel_pars_->forceNonTrans(); // debug hack: might break kernel in the worst case std::cout << "DEBUG WARNING: Kernel index 0: FORCED 32 and non-trans" << std::endl; } std::cout << "Selected kernel index " << kidx << " out of " << valid_kernels_.size() << std::endl; kernel_pars_->print(); } } if (update_count_ < FORCE_TUNING_THRES) { // only once again update_count_ += 1; force_tuning = force_tuning || (update_count_ == FORCE_TUNING_THRES); } // tune if requested if (force_tuning) { this->tuneUpdate( kernel_pars_, valid_kernels_, x_in, d_in, dev_weights, rpucuda_device, up, lr, m_batch, x_trans, d_trans); } // do update this->executeUpdate( kernel_pars_, x_in, d_in, dev_weights, rpucuda_device, up, lr, m_batch, x_trans, d_trans); } #define RPU_PWU_ITER_TEMPLATE(NUM_T, XITERT, DITERT) \ template void PulsedWeightUpdater<NUM_T>::update( \ XITERT, DITERT, NUM_T *, AbstractRPUDeviceCuda<NUM_T> *, \ const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, const int, const bool, const bool); \ template void PulsedWeightUpdater<NUM_T>::doFPupdate( \ XITERT, DITERT, NUM_T *, const NUM_T, const int, const bool, const bool, const NUM_T); \ template void PulsedWeightUpdater<NUM_T>::tuneUpdate( \ pwukp_t<NUM_T> &, pwukpvec_t<NUM_T> &, XITERT, DITERT, NUM_T *, \ PulsedRPUDeviceCudaBase<NUM_T> *, const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, \ const int, const bool, const bool); \ template void PulsedWeightUpdater<NUM_T>::executeUpdate( \ pwukp_t<NUM_T>, XITERT, DITERT, NUM_T *, PulsedRPUDeviceCudaBase<NUM_T> *, \ const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, const int, const bool, const bool); #define TRANSFLOAT(TRANS) TRANS, float template class PulsedWeightUpdater<float>; RPU_PWU_ITER_TEMPLATE(float, IndexReaderTransInputIterator<float>, const float *); RPU_PWU_ITER_TEMPLATE(float, IndexReaderInputIterator<float>, const float *); RPU_PWU_ITER_TEMPLATE(float, const float *, const float *); RPU_PWU_ITER_TEMPLATE( float, IndexReaderTransInputIterator<float>, PermuterTransInputIterator<float>); RPU_PWU_ITER_TEMPLATE(float, const float *, PermuterTransInputIterator<float>); #undef TRANSFLOAT #ifdef RPU_USE_DOUBLE #define TRANSDOUBLE(TRANS) TRANS, double template class PulsedWeightUpdater<double>; RPU_PWU_ITER_TEMPLATE(double, IndexReaderTransInputIterator<double>, const double *); RPU_PWU_ITER_TEMPLATE(double, IndexReaderInputIterator<double>, const double *); RPU_PWU_ITER_TEMPLATE(double, const double *, const double *); RPU_PWU_ITER_TEMPLATE( double, IndexReaderTransInputIterator<double>, PermuterTransInputIterator<double>); RPU_PWU_ITER_TEMPLATE(double, const double *, PermuterTransInputIterator<double>); #undef TRANSDOUBLE #endif #undef RPU_PWU_ITER_TEMPLATE } // namespace RPU
70222ae22998222a6c4a23eef0a5b705bdd7c71d.cu
/** * (C) Copyright 2020 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "cuda_math_util.h" #include "pulsed_weight_updater.h" #include <cub/cub.cuh> #include <algorithm> #include <chrono> #include <cmath> #include <iostream> #include <memory> #include <random> #include "io_iterator.h" #include "pwu_kernel_parameter.h" #include "rpucuda_pulsed_device.h" namespace RPU { /****************************************************************************************************************/ /* PULSEDWEIGHTUPDATER */ /******************************************************************************************************************/ template <typename T> PulsedWeightUpdater<T>::PulsedWeightUpdater(CudaContext *c, int x_size, int d_size) : context_{c}, x_size_{x_size}, d_size_{d_size} { blm_ = make_unique<BitLineMaker<T>>(c, x_size, d_size); up_context_ = nullptr; is_async_update_ = false; }; template <typename T> pwukpvec_t<T> PulsedWeightUpdater<T>::getValidUpdateKernels( PulsedRPUDeviceCudaBase<T> *rpucuda_device, int m_batch, const PulsedUpdateMetaParameter<T> &up) { pwukpvec_t<T> v; for (int use_bo64 : {1, 0}) { // omit 2 (ie bo64 translation) for (int out_trans : {true, false}) { pwukpvec_t<T> v2 = rpucuda_device->getUpdateKernels(m_batch, up.getNK32Default(), use_bo64, out_trans, up); for (int i = 0; i < v2.size(); i++) { if (v2[i]->isValid()) { v.push_back(v2[i]); } } } if (v.size() > 0 && (m_batch >= 1000)) { break; // prefer bo64 for large batch if possible } } return v; } template <typename T> void PulsedWeightUpdater<T>::makeUpdateAsync() { if (!is_async_update_) { is_async_update_ = true; up_context_ = make_unique<CudaContext>(context_->getGPUId()); } } template <typename T> void PulsedWeightUpdater<T>::waitForUpdateCalculations() { if (is_async_update_) { // use the up_context event for it because context_ might be shared context_->recordWaitEvent(up_context_->getStream(), up_context_->getEvent()); } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::executeUpdate( pwukp_t<T> kpars, XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, PulsedRPUDeviceCudaBase<T> *rpucuda_device, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans_in, const bool d_trans_in) { blm_->makeCounts( x_in, d_in, up, rpucuda_device->getDwMin(), lr, m_batch, x_trans_in, d_trans_in, kpars->getOutTrans(), kpars->getUseBo64()); CudaContext *c = context_; if (is_async_update_) { up_context_->recordWaitEvent(context_->getStream(), context_->getEvent()); c = &*up_context_; } rpucuda_device->runUpdateKernel( kpars, c, dev_weights, m_batch, &*blm_, up, c->getRandomStates(kpars->getNStates())); } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::tuneUpdate( pwukp_t<T> &opt_kernel_pars, pwukpvec_t<T> &v, XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, PulsedRPUDeviceCudaBase<T> *rpucuda_device, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans_in, const bool d_trans_in) { bool is_async_update = is_async_update_; is_async_update_ = false; CUDA_TIMING_INIT; int nrepeats = 3; CudaArray<T> dev_tmp_weights(context_, x_size_ * d_size_); auto *tmp_device = rpucuda_device->clone(); PulsedUpdateMetaParameter<T> up_tuning(up); up_tuning._currently_tuning = true; dev_tmp_weights.assignFromDevice(dev_weights); context_->synchronizeDevice(); // maybe other streams exist. T min_timing = FLT_MAX; int min_i = 0; for (int k = 0; k < v.size(); k++) { CUDA_TIMING_START(*context_); for (int i = 0; i < nrepeats; i++) { this->executeUpdate( v[k], x_in, d_in, dev_tmp_weights.getData(), tmp_device, up_tuning, lr, m_batch, x_trans_in, d_trans_in); } CUDA_TIMING_STOP_NO_OUTPUT(*context_); v[k]->timing = milliseconds / nrepeats; if (v[k]->timing < min_timing) { min_timing = v[k]->timing; min_i = k; } } CUDA_TIMING_DESTROY; is_async_update_ = is_async_update; opt_kernel_pars = v[min_i]; delete tmp_device; DEBUG_OUT( "UpdateTuner: Using " << opt_kernel_pars->getName() << " for PWU [" << opt_kernel_pars->timing << "].\n\n"); DEBUG_CALL(opt_kernel_pars->print()); } template <typename T> template <typename InputIteratorT> const T *PulsedWeightUpdater<T>::copyIterator2Buffer( InputIteratorT vec, std::shared_ptr<CudaArray<T>> &buffer, int size) { if ((buffer == nullptr) || (buffer->getSize() < size)) { buffer = std::shared_ptr<CudaArray<T>>(new CudaArray<T>(context_, size)); } RPU::math::copyWithIterator(context_, buffer->getData(), vec, size); return buffer->getDataConst(); } template <> template <> const float *PulsedWeightUpdater<float>::copyIterator2Buffer( const float *vec, std::shared_ptr<CudaArray<float>> &buffer, int size) { return vec; } #ifdef RPU_USE_DOUBLE template <> template <> const double *PulsedWeightUpdater<double>::copyIterator2Buffer( const double *vec, std::shared_ptr<CudaArray<double>> &buffer, int size) { return vec; } #endif template <typename T> void PulsedWeightUpdater<T>::setSharedBuffer( int m_batch, std::shared_ptr<CudaArray<T>> x_buffer, std::shared_ptr<CudaArray<T>> d_buffer) { if (x_buffer) { dev_fpx_buffer_ = x_buffer; if (dev_fpx_buffer_->getSize() < m_batch * x_size_) { RPU_FATAL("X batch buffer size too small."); } } if (d_buffer) { dev_fpd_buffer_ = d_buffer; if (dev_fpd_buffer_->getSize() < m_batch * d_size_) { RPU_FATAL("D batch buffer size too small."); } } } template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::doFPupdate( XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, const T lr, const int m_batch, const bool x_trans, const bool d_trans, const T beta) { const T *x_out = copyIterator2Buffer(x_in, dev_fpx_buffer_, x_size_ * m_batch); const T *d_out = copyIterator2Buffer(d_in, dev_fpd_buffer_, d_size_ * m_batch); if (m_batch == 1 && beta == 1.0) { RPU::math::ger<T>(context_, d_size_, x_size_, -lr, d_out, 1, x_out, 1, dev_weights, d_size_); } else { RPU::math::gemm<T>( context_, d_trans, !x_trans, d_size_, // M x_size_, // N m_batch, // K -lr, d_out, d_trans ? m_batch : d_size_, x_out, x_trans ? m_batch : x_size_, beta, dev_weights, d_size_); } } template <typename T> bool PulsedWeightUpdater<T>::checkForFPUpdate( AbstractRPUDeviceCuda<T> *rpucuda_device_in, const PulsedUpdateMetaParameter<T> &up) { return (up.pulse_type == PulseType::None) || (rpucuda_device_in == nullptr) || !rpucuda_device_in->isPulsedDevice() || (rpucuda_device_in->implements() == DeviceUpdateType::FloatingPoint); } #define FORCE_TUNING_THRES 0 template <typename T> template <typename XInputIteratorT, typename DInputIteratorT> void PulsedWeightUpdater<T>::update( XInputIteratorT x_in, DInputIteratorT d_in, T *dev_weights, AbstractRPUDeviceCuda<T> *rpucuda_device_in, const PulsedUpdateMetaParameter<T> &up, const T lr, const int m_batch, const bool x_trans, const bool d_trans) { // FP update if no device is given if (checkForFPUpdate(rpucuda_device_in, up) || (up.pulse_type == PulseType::NoneWithDevice)) { doFPupdate(x_in, d_in, dev_weights, lr, m_batch, x_trans, d_trans); if (up.pulse_type == PulseType::NoneWithDevice) { // apply bounds rpucuda_device_in->clipWeights(dev_weights, -1.0); } return; } // safe because of isPulsedDevice PulsedRPUDeviceCudaBase<T> *rpucuda_device = static_cast<PulsedRPUDeviceCudaBase<T> *>(rpucuda_device_in); bool force_tuning = false; // check need for init (or re-init) DeviceUpdateType update_type = rpucuda_device->implements(); if (update_type != update_type_) //|| (!blm_->checkBuffer(m_batch,BL))) { // we do not check for change in x_size/d_size, but they are assumed to be constant as well! force_tuning = true; update_type_ = update_type; // init buffers update_count_ = 0; // init kernels valid_kernels_ = getValidUpdateKernels(rpucuda_device, m_batch, up); if (valid_kernels_.size() == 0) { RPU_FATAL("Cannot find valid update kernels"); } kernel_pars_ = valid_kernels_[0]; // this will be modified if tuned if (up._debug_kernel_index >= 0) { // set default for debugging // just get a valid kpars (will be overwritten if tuning is used below) force_tuning = false; int kidx = up._debug_kernel_index; if (up._debug_kernel_index >= valid_kernels_.size()) { std::cout << "DEBUG WARNING: kernel index out of range " << valid_kernels_.size() << std::endl; kidx = 0; } kernel_pars_ = valid_kernels_[kidx]; if (kernel_pars_->getUseBo64() == 1) { std::cout << "DEBUG WARNING: cannot test BO64 direct. Set to translate " << std::endl; kernel_pars_->forceBo64Translate(); } if (kidx == 0) { kernel_pars_->force32(); // debug hack: might break kernel in the worst case kernel_pars_->forceNonTrans(); // debug hack: might break kernel in the worst case std::cout << "DEBUG WARNING: Kernel index 0: FORCED 32 and non-trans" << std::endl; } std::cout << "Selected kernel index " << kidx << " out of " << valid_kernels_.size() << std::endl; kernel_pars_->print(); } } if (update_count_ < FORCE_TUNING_THRES) { // only once again update_count_ += 1; force_tuning = force_tuning || (update_count_ == FORCE_TUNING_THRES); } // tune if requested if (force_tuning) { this->tuneUpdate( kernel_pars_, valid_kernels_, x_in, d_in, dev_weights, rpucuda_device, up, lr, m_batch, x_trans, d_trans); } // do update this->executeUpdate( kernel_pars_, x_in, d_in, dev_weights, rpucuda_device, up, lr, m_batch, x_trans, d_trans); } #define RPU_PWU_ITER_TEMPLATE(NUM_T, XITERT, DITERT) \ template void PulsedWeightUpdater<NUM_T>::update( \ XITERT, DITERT, NUM_T *, AbstractRPUDeviceCuda<NUM_T> *, \ const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, const int, const bool, const bool); \ template void PulsedWeightUpdater<NUM_T>::doFPupdate( \ XITERT, DITERT, NUM_T *, const NUM_T, const int, const bool, const bool, const NUM_T); \ template void PulsedWeightUpdater<NUM_T>::tuneUpdate( \ pwukp_t<NUM_T> &, pwukpvec_t<NUM_T> &, XITERT, DITERT, NUM_T *, \ PulsedRPUDeviceCudaBase<NUM_T> *, const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, \ const int, const bool, const bool); \ template void PulsedWeightUpdater<NUM_T>::executeUpdate( \ pwukp_t<NUM_T>, XITERT, DITERT, NUM_T *, PulsedRPUDeviceCudaBase<NUM_T> *, \ const PulsedUpdateMetaParameter<NUM_T> &, const NUM_T, const int, const bool, const bool); #define TRANSFLOAT(TRANS) TRANS, float template class PulsedWeightUpdater<float>; RPU_PWU_ITER_TEMPLATE(float, IndexReaderTransInputIterator<float>, const float *); RPU_PWU_ITER_TEMPLATE(float, IndexReaderInputIterator<float>, const float *); RPU_PWU_ITER_TEMPLATE(float, const float *, const float *); RPU_PWU_ITER_TEMPLATE( float, IndexReaderTransInputIterator<float>, PermuterTransInputIterator<float>); RPU_PWU_ITER_TEMPLATE(float, const float *, PermuterTransInputIterator<float>); #undef TRANSFLOAT #ifdef RPU_USE_DOUBLE #define TRANSDOUBLE(TRANS) TRANS, double template class PulsedWeightUpdater<double>; RPU_PWU_ITER_TEMPLATE(double, IndexReaderTransInputIterator<double>, const double *); RPU_PWU_ITER_TEMPLATE(double, IndexReaderInputIterator<double>, const double *); RPU_PWU_ITER_TEMPLATE(double, const double *, const double *); RPU_PWU_ITER_TEMPLATE( double, IndexReaderTransInputIterator<double>, PermuterTransInputIterator<double>); RPU_PWU_ITER_TEMPLATE(double, const double *, PermuterTransInputIterator<double>); #undef TRANSDOUBLE #endif #undef RPU_PWU_ITER_TEMPLATE } // namespace RPU
f09a19efbd287e72e48c12ae30a7b15df4f30fb6.hip
// !!! This is a file automatically generated by hipify!!! // // Mex wrapper to CUSPARSE sort for CSR format (csrsort). // // Inspired by cusparse samples (conugateGradient) and Matlab gcsparse. // http://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csrmv // http://www.mathworks.com/matlabcentral/fileexchange/44423-gpu-sparse--accumarray--non-uniform-grid // // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> /* Using updated (v2) interfaces to cublas */ #include <hip/hip_runtime.h> #include <hipsparse.h> #include <rocblas.h> // MATLAB related #include "mex.h" #include "gpu/mxGPUArray.h" #include "mxShowCriticalErrorMessage.c" // Input Arguments #define ROW_CSR prhs[0] #define COL prhs[1] #define VAL prhs[2] #define NROWS prhs[3] #define NCOLS prhs[4] // Output Arguments #define COL_SORT plhs[0] #define VAL_SORT plhs[1] void mexFunction(int nlhs, mxArray * plhs[], int nrhs, const mxArray * prhs[]) { // Checks if (nlhs > 2) mxShowCriticalErrorMessage("wrong number of output arguments",nlhs); if (nrhs != 5) mxShowCriticalErrorMessage("wrong number of input arguments",nrhs); // Initialize the MathWorks GPU API mxInitGPU(); // Create Matlab pointers on the GPU mxGPUArray const *row_csr = mxGPUCreateFromMxArray(ROW_CSR); mxGPUArray const *col = mxGPUCreateFromMxArray(COL); mxGPUArray const *val = mxGPUCreateFromMxArray(VAL); // Checks - note vectors must be in CSR format int nnz = mxGPUGetNumberOfElements(val); if (mxGPUGetNumberOfElements(col) != nnz) mxShowCriticalErrorMessage("COL and VAL argument length mismatch"); if (!mxIsScalar(NROWS)) mxShowCriticalErrorMessage("NROWS argument must be a scalar"); if (!mxIsScalar(NCOLS)) mxShowCriticalErrorMessage("NCOLS argument must be a scalar"); int ncols = (int)mxGetScalar(NCOLS); int nrows = (int)mxGetScalar(NROWS); if (mxGPUGetNumberOfElements(row_csr) != nrows+1) mxShowCriticalErrorMessage("ROW_CSR argument wrong size"); if (mxGPUGetClassID(row_csr) != mxINT32_CLASS) mxShowCriticalErrorMessage("ROW_CSR argument is not int32"); if (mxGPUGetClassID(col) != mxINT32_CLASS) mxShowCriticalErrorMessage("COL argument is not int32"); if (mxGPUGetClassID(val) != mxSINGLE_CLASS) mxShowCriticalErrorMessage("VAL argument is not single"); // Create space for output vectors const mwSize ndim = 1; mwSize dims[ndim]; dims[0] = nnz; mxGPUArray *col_sort = mxGPUCreateGPUArray(ndim, dims, mxINT32_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES); if (col_sort==NULL) mxShowCriticalErrorMessage("mxGPUCreateGPUArray failed"); mxComplexity ccx = mxGPUGetComplexity(val); mxGPUArray *val_sort = mxGPUCreateGPUArray(ndim, dims, mxSINGLE_CLASS, ccx, MX_GPU_INITIALIZE_VALUES); if (val_sort==NULL) mxShowCriticalErrorMessage("mxGPUCreateGPUArray failed"); // Get handle to the CUBLAS context hipblasHandle_t cublasHandle = 0; hipblasStatus_t cublasStatus; cublasStatus = hipblasCreate(&cublasHandle); if (cublasStatus != HIPBLAS_STATUS_SUCCESS) mxShowCriticalErrorMessage(cublasStatus); // Get handle to the CUSPARSE context hipError_t cudaStatus; hipsparseStatus_t cusparseStatus; hipsparseHandle_t cusparseHandle = 0; cusparseStatus = hipsparseCreate(&cusparseHandle); if (cusparseStatus != HIPSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage(cusparseStatus); hipsparseMatDescr_t descr = 0; cusparseStatus = hipsparseCreateMatDescr(&descr); if (cusparseStatus != HIPSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage(cusparseStatus); hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ONE); // Convert from matlab pointers to native pointers const int * const d_row_csr = (int*)mxGPUGetDataReadOnly(row_csr); const int * const d_col = (int*)mxGPUGetDataReadOnly(col); int *d_col_sort = (int*)mxGPUGetData(col_sort); // Now we can access the arrays, we can do some checks int base; hipMemcpy(&base, d_row_csr, sizeof(int), hipMemcpyDeviceToHost); if (base != HIPSPARSE_INDEX_BASE_ONE) mxShowCriticalErrorMessage("A_ROW_CSR not using 1-based indexing"); int nnz_check; hipMemcpy(&nnz_check, d_row_csr+nrows, sizeof(int), hipMemcpyDeviceToHost); nnz_check -= HIPSPARSE_INDEX_BASE_ONE; if (nnz_check != nnz) mxShowCriticalErrorMessage("ROW_CSR argument last element != nnz",nnz_check); // Since sort is in-place, copy the read-only vectors to read-write ones cudaStatus = hipMemcpy((void *)d_col_sort, d_col, nnz*sizeof(int), hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) mxShowCriticalErrorMessage("Operation hipMemcpy failed"); if (ccx == mxREAL) { const float * const d_val = (float*)mxGPUGetDataReadOnly(val); float *d_val_sort = (float*)mxGPUGetData(val_sort); cudaStatus = hipMemcpy((void *)d_val_sort, d_val, nnz*sizeof(float), hipMemcpyDeviceToDevice); } else { const cuFloatComplex * const d_val = (cuFloatComplex*)mxGPUGetDataReadOnly(val); cuFloatComplex *d_val_sort = (cuFloatComplex*)mxGPUGetData(val_sort); cudaStatus = hipMemcpy((void *)d_val_sort, d_val, nnz*sizeof(cuFloatComplex), hipMemcpyDeviceToDevice); } if (cudaStatus != hipSuccess) mxShowCriticalErrorMessage("Operation hipMemcpy failed",cudaStatus); // Sort by rows int *P = NULL; void *pBuffer = NULL; size_t pBufferSizeInBytes = 0; if (nnz > 0) { // step 1: allocate buffer cusparseStatus = hipsparseXcsrsort_bufferSizeExt(cusparseHandle, nrows, ncols, nnz, d_row_csr, d_col, &pBufferSizeInBytes); if (cusparseStatus != HIPSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage("Operation hipsparseXcoosort_bufferSizeExt failed",cusparseStatus); cudaStatus = hipMalloc( &pBuffer, sizeof(char)*pBufferSizeInBytes); if (cudaStatus != hipSuccess) mxShowCriticalErrorMessage("Operation hipMalloc failed",cudaStatus); // step 2: setup permutation vector P to identity cudaStatus = hipMalloc( &P, sizeof(int)*nnz); if (cudaStatus != hipSuccess) mxShowCriticalErrorMessage("Operation hipMalloc failed",cudaStatus); cusparseStatus = hipsparseCreateIdentityPermutation(cusparseHandle, nnz, P); if (cusparseStatus != HIPSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage("Operation hipsparseCreateIdentityPermutation failed",cusparseStatus); // step 3: sort COO format by Row cusparseStatus = hipsparseXcsrsort(cusparseHandle, nrows, ncols, nnz, descr, d_row_csr, d_col_sort, P, pBuffer); if (cusparseStatus != HIPSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage("Operation hipsparseXcsrsort failed",cusparseStatus); // step 4: gather sorted cooVals if (ccx == mxREAL) { float *d_val = (float*)mxGPUGetDataReadOnly(val); float *d_val_sort = (float*)mxGPUGetData(val_sort); #if CUDART_VERSION >= 11000 hipsparseHandle_t handle = NULL; hipsparseDnVecDescr_t vec_values; hipsparseSpVecDescr_t vec_permutation; hipsparseCreate(&handle); hipsparseCreateDnVec(&vec_values, nnz, d_val, HIP_R_32F); hipsparseCreateSpVec(&vec_permutation, nnz, nnz, P, d_val_sort, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, HIP_R_32F); // MUST USE BASE_ZERO cusparseStatus = hipsparseGather(handle, vec_values, vec_permutation); hipsparseDestroyDnVec(vec_values); hipsparseDestroySpVec(vec_permutation); hipsparseDestroy(handle); #else cusparseStatus = hipsparseSgthr(cusparseHandle, nnz, d_val, d_val_sort, P, HIPSPARSE_INDEX_BASE_ZERO); // MUST USE BASE_ZERO #endif } else { cuFloatComplex *d_val = (cuFloatComplex*)mxGPUGetDataReadOnly(val); cuFloatComplex *d_val_sort = (cuFloatComplex*)mxGPUGetData(val_sort); #if CUDART_VERSION >= 11000 hipsparseHandle_t handle = NULL; hipsparseDnVecDescr_t vec_values; hipsparseSpVecDescr_t vec_permutation; hipsparseCreate(&handle); hipsparseCreateDnVec(&vec_values, nnz, d_val, HIP_C_32F); hipsparseCreateSpVec(&vec_permutation, nnz, nnz, P, d_val_sort, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, HIP_C_32F); // MUST USE BASE_ZERO cusparseStatus = hipsparseGather(handle, vec_values, vec_permutation); hipsparseDestroyDnVec(vec_values); hipsparseDestroySpVec(vec_permutation); hipsparseDestroy(handle); #else cusparseStatus = hipsparseCgthr(cusparseHandle, nnz, d_val, d_val_sort, P, HIPSPARSE_INDEX_BASE_ZERO); // MUST USE BASE_ZERO #endif } if (cusparseStatus != HIPSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage("Operation hipsparseSgthr or hipsparseCgthr failed",cusparseStatus); } // Return result COL_SORT = mxGPUCreateMxArrayOnGPU(col_sort); VAL_SORT = mxGPUCreateMxArrayOnGPU(val_sort); // Make sure operations are finished before deleting //hipDeviceSynchronize(); // Clean up hipsparseDestroyMatDescr(descr); hipsparseDestroy(cusparseHandle); hipblasDestroy(cublasHandle); mxGPUDestroyGPUArray(row_csr); mxGPUDestroyGPUArray(col); mxGPUDestroyGPUArray(col_sort); mxGPUDestroyGPUArray(val); mxGPUDestroyGPUArray(val_sort); if (pBuffer) hipFree(pBuffer); if (P) hipFree(P); return; }
f09a19efbd287e72e48c12ae30a7b15df4f30fb6.cu
// // Mex wrapper to CUSPARSE sort for CSR format (csrsort). // // Inspired by cusparse samples (conugateGradient) and Matlab gcsparse. // http://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csrmv // http://www.mathworks.com/matlabcentral/fileexchange/44423-gpu-sparse--accumarray--non-uniform-grid // // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> /* Using updated (v2) interfaces to cublas */ #include <cuda_runtime.h> #include <cusparse.h> #include <cublas_v2.h> // MATLAB related #include "mex.h" #include "gpu/mxGPUArray.h" #include "mxShowCriticalErrorMessage.c" // Input Arguments #define ROW_CSR prhs[0] #define COL prhs[1] #define VAL prhs[2] #define NROWS prhs[3] #define NCOLS prhs[4] // Output Arguments #define COL_SORT plhs[0] #define VAL_SORT plhs[1] void mexFunction(int nlhs, mxArray * plhs[], int nrhs, const mxArray * prhs[]) { // Checks if (nlhs > 2) mxShowCriticalErrorMessage("wrong number of output arguments",nlhs); if (nrhs != 5) mxShowCriticalErrorMessage("wrong number of input arguments",nrhs); // Initialize the MathWorks GPU API mxInitGPU(); // Create Matlab pointers on the GPU mxGPUArray const *row_csr = mxGPUCreateFromMxArray(ROW_CSR); mxGPUArray const *col = mxGPUCreateFromMxArray(COL); mxGPUArray const *val = mxGPUCreateFromMxArray(VAL); // Checks - note vectors must be in CSR format int nnz = mxGPUGetNumberOfElements(val); if (mxGPUGetNumberOfElements(col) != nnz) mxShowCriticalErrorMessage("COL and VAL argument length mismatch"); if (!mxIsScalar(NROWS)) mxShowCriticalErrorMessage("NROWS argument must be a scalar"); if (!mxIsScalar(NCOLS)) mxShowCriticalErrorMessage("NCOLS argument must be a scalar"); int ncols = (int)mxGetScalar(NCOLS); int nrows = (int)mxGetScalar(NROWS); if (mxGPUGetNumberOfElements(row_csr) != nrows+1) mxShowCriticalErrorMessage("ROW_CSR argument wrong size"); if (mxGPUGetClassID(row_csr) != mxINT32_CLASS) mxShowCriticalErrorMessage("ROW_CSR argument is not int32"); if (mxGPUGetClassID(col) != mxINT32_CLASS) mxShowCriticalErrorMessage("COL argument is not int32"); if (mxGPUGetClassID(val) != mxSINGLE_CLASS) mxShowCriticalErrorMessage("VAL argument is not single"); // Create space for output vectors const mwSize ndim = 1; mwSize dims[ndim]; dims[0] = nnz; mxGPUArray *col_sort = mxGPUCreateGPUArray(ndim, dims, mxINT32_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES); if (col_sort==NULL) mxShowCriticalErrorMessage("mxGPUCreateGPUArray failed"); mxComplexity ccx = mxGPUGetComplexity(val); mxGPUArray *val_sort = mxGPUCreateGPUArray(ndim, dims, mxSINGLE_CLASS, ccx, MX_GPU_INITIALIZE_VALUES); if (val_sort==NULL) mxShowCriticalErrorMessage("mxGPUCreateGPUArray failed"); // Get handle to the CUBLAS context cublasHandle_t cublasHandle = 0; cublasStatus_t cublasStatus; cublasStatus = cublasCreate(&cublasHandle); if (cublasStatus != CUBLAS_STATUS_SUCCESS) mxShowCriticalErrorMessage(cublasStatus); // Get handle to the CUSPARSE context cudaError_t cudaStatus; cusparseStatus_t cusparseStatus; cusparseHandle_t cusparseHandle = 0; cusparseStatus = cusparseCreate(&cusparseHandle); if (cusparseStatus != CUSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage(cusparseStatus); cusparseMatDescr_t descr = 0; cusparseStatus = cusparseCreateMatDescr(&descr); if (cusparseStatus != CUSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage(cusparseStatus); cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ONE); // Convert from matlab pointers to native pointers const int * const d_row_csr = (int*)mxGPUGetDataReadOnly(row_csr); const int * const d_col = (int*)mxGPUGetDataReadOnly(col); int *d_col_sort = (int*)mxGPUGetData(col_sort); // Now we can access the arrays, we can do some checks int base; cudaMemcpy(&base, d_row_csr, sizeof(int), cudaMemcpyDeviceToHost); if (base != CUSPARSE_INDEX_BASE_ONE) mxShowCriticalErrorMessage("A_ROW_CSR not using 1-based indexing"); int nnz_check; cudaMemcpy(&nnz_check, d_row_csr+nrows, sizeof(int), cudaMemcpyDeviceToHost); nnz_check -= CUSPARSE_INDEX_BASE_ONE; if (nnz_check != nnz) mxShowCriticalErrorMessage("ROW_CSR argument last element != nnz",nnz_check); // Since sort is in-place, copy the read-only vectors to read-write ones cudaStatus = cudaMemcpy((void *)d_col_sort, d_col, nnz*sizeof(int), cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) mxShowCriticalErrorMessage("Operation cudaMemcpy failed"); if (ccx == mxREAL) { const float * const d_val = (float*)mxGPUGetDataReadOnly(val); float *d_val_sort = (float*)mxGPUGetData(val_sort); cudaStatus = cudaMemcpy((void *)d_val_sort, d_val, nnz*sizeof(float), cudaMemcpyDeviceToDevice); } else { const cuFloatComplex * const d_val = (cuFloatComplex*)mxGPUGetDataReadOnly(val); cuFloatComplex *d_val_sort = (cuFloatComplex*)mxGPUGetData(val_sort); cudaStatus = cudaMemcpy((void *)d_val_sort, d_val, nnz*sizeof(cuFloatComplex), cudaMemcpyDeviceToDevice); } if (cudaStatus != cudaSuccess) mxShowCriticalErrorMessage("Operation cudaMemcpy failed",cudaStatus); // Sort by rows int *P = NULL; void *pBuffer = NULL; size_t pBufferSizeInBytes = 0; if (nnz > 0) { // step 1: allocate buffer cusparseStatus = cusparseXcsrsort_bufferSizeExt(cusparseHandle, nrows, ncols, nnz, d_row_csr, d_col, &pBufferSizeInBytes); if (cusparseStatus != CUSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage("Operation cusparseXcoosort_bufferSizeExt failed",cusparseStatus); cudaStatus = cudaMalloc( &pBuffer, sizeof(char)*pBufferSizeInBytes); if (cudaStatus != cudaSuccess) mxShowCriticalErrorMessage("Operation cudaMalloc failed",cudaStatus); // step 2: setup permutation vector P to identity cudaStatus = cudaMalloc( &P, sizeof(int)*nnz); if (cudaStatus != cudaSuccess) mxShowCriticalErrorMessage("Operation cudaMalloc failed",cudaStatus); cusparseStatus = cusparseCreateIdentityPermutation(cusparseHandle, nnz, P); if (cusparseStatus != CUSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage("Operation cusparseCreateIdentityPermutation failed",cusparseStatus); // step 3: sort COO format by Row cusparseStatus = cusparseXcsrsort(cusparseHandle, nrows, ncols, nnz, descr, d_row_csr, d_col_sort, P, pBuffer); if (cusparseStatus != CUSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage("Operation cusparseXcsrsort failed",cusparseStatus); // step 4: gather sorted cooVals if (ccx == mxREAL) { float *d_val = (float*)mxGPUGetDataReadOnly(val); float *d_val_sort = (float*)mxGPUGetData(val_sort); #if CUDART_VERSION >= 11000 cusparseHandle_t handle = NULL; cusparseDnVecDescr_t vec_values; cusparseSpVecDescr_t vec_permutation; cusparseCreate(&handle); cusparseCreateDnVec(&vec_values, nnz, d_val, CUDA_R_32F); cusparseCreateSpVec(&vec_permutation, nnz, nnz, P, d_val_sort, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F); // MUST USE BASE_ZERO cusparseStatus = cusparseGather(handle, vec_values, vec_permutation); cusparseDestroyDnVec(vec_values); cusparseDestroySpVec(vec_permutation); cusparseDestroy(handle); #else cusparseStatus = cusparseSgthr(cusparseHandle, nnz, d_val, d_val_sort, P, CUSPARSE_INDEX_BASE_ZERO); // MUST USE BASE_ZERO #endif } else { cuFloatComplex *d_val = (cuFloatComplex*)mxGPUGetDataReadOnly(val); cuFloatComplex *d_val_sort = (cuFloatComplex*)mxGPUGetData(val_sort); #if CUDART_VERSION >= 11000 cusparseHandle_t handle = NULL; cusparseDnVecDescr_t vec_values; cusparseSpVecDescr_t vec_permutation; cusparseCreate(&handle); cusparseCreateDnVec(&vec_values, nnz, d_val, CUDA_C_32F); cusparseCreateSpVec(&vec_permutation, nnz, nnz, P, d_val_sort, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, CUDA_C_32F); // MUST USE BASE_ZERO cusparseStatus = cusparseGather(handle, vec_values, vec_permutation); cusparseDestroyDnVec(vec_values); cusparseDestroySpVec(vec_permutation); cusparseDestroy(handle); #else cusparseStatus = cusparseCgthr(cusparseHandle, nnz, d_val, d_val_sort, P, CUSPARSE_INDEX_BASE_ZERO); // MUST USE BASE_ZERO #endif } if (cusparseStatus != CUSPARSE_STATUS_SUCCESS) mxShowCriticalErrorMessage("Operation cusparseSgthr or cusparseCgthr failed",cusparseStatus); } // Return result COL_SORT = mxGPUCreateMxArrayOnGPU(col_sort); VAL_SORT = mxGPUCreateMxArrayOnGPU(val_sort); // Make sure operations are finished before deleting //cudaDeviceSynchronize(); // Clean up cusparseDestroyMatDescr(descr); cusparseDestroy(cusparseHandle); cublasDestroy(cublasHandle); mxGPUDestroyGPUArray(row_csr); mxGPUDestroyGPUArray(col); mxGPUDestroyGPUArray(col_sort); mxGPUDestroyGPUArray(val); mxGPUDestroyGPUArray(val_sort); if (pBuffer) cudaFree(pBuffer); if (P) cudaFree(P); return; }
2c69ddec5a1970b61b572e21fe83c122fa40e704.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdafx.h" #include "CudaHelper.h" #include "CudaMathHelper.h" namespace mf { texture<float4, hipTextureType1D, hipReadModeElementType> vectorMangitudeCtfTex; // 1D texture for color transfer function. texture<float4, hipTextureType3D, hipReadModeElementType> vectorFieldTex; // 3D texture for storing of volumetric vector field. hipArray* d_volumeArray = nullptr; hipArray* d_vectorMangitudeCtfData = nullptr; float vectorMangitudeCtfLength; float maxMangitude; extern "C" void initCuda(const float4* h_volume, hipExtent volumeSize, const std::vector<float4>& vectorMangitudeCtf, float maxVectorMangitude) { { std::cout << "Initializing vector magnitude color transfer function." << std::endl; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>(); checkCudaErrors(hipMallocArray(&d_vectorMangitudeCtfData, &channelDesc, vectorMangitudeCtf.size(), 1)); checkCudaErrors(hipMemcpyToArray(d_vectorMangitudeCtfData, 0, 0, &vectorMangitudeCtf[0], sizeof(float4) * vectorMangitudeCtf.size(), hipMemcpyHostToDevice)); vectorMangitudeCtfTex.normalized = false; vectorMangitudeCtfTex.filterMode = hipFilterModeLinear; vectorMangitudeCtfTex.addressMode[0] = hipAddressModeClamp; checkCudaErrors(hipBindTextureToArray(vectorMangitudeCtfTex, d_vectorMangitudeCtfData, channelDesc)); vectorMangitudeCtfLength = (float)vectorMangitudeCtf.size(); maxMangitude = maxVectorMangitude; } { // allocate 3D array std::cout << "Allocating CUDA 3D array in device." << std::endl; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>(); checkCudaErrors(hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize)); // copy data to 3D array std::cout << "Copying data to device." << std::endl; hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)h_volume, volumeSize.width * sizeof(float4), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; checkCudaErrors(hipMemcpy3D(&copyParams)); // set texture parameters vectorFieldTex.normalized = false; vectorFieldTex.filterMode = hipFilterModeLinear; // linear interpolation vectorFieldTex.addressMode[0] = hipAddressModeClamp; vectorFieldTex.addressMode[1] = hipAddressModeClamp; vectorFieldTex.addressMode[2] = hipAddressModeClamp; // bind array to 3D texture std::cout << "Binding 3D texture." << std::endl; checkCudaErrors(hipBindTextureToArray(vectorFieldTex, d_volumeArray, channelDesc)); std::cout << "Volume data successfully copied to device." << std::endl; } checkCudaErrors(hipDeviceSynchronize()); } __inline__ __device__ float3 findPerpendicular(float3 v) { /*float ax = abs(v.x); float ay = abs(v.y); float az = abs(v.z); if (ax >= az && ay >= az) { // ax, ay are dominant return make_float3(-v.y, v.x, 0.0f); } else if (ax >= ay && az >= ay) { // ax, az are dominant return make_float3(-v.z, 0.0f, v.x); } else { // ay, az are dominant return make_float3(0.0f, -v.z, v.y); }*/ return make_float3(-v.y, v.x, 0.0f); } __device__ double4 eulerIntegrate(double3 pos, double dt, float3 volumeCoordSpaceMult) { float4 v = tex3D(vectorFieldTex, (float)(pos.x * volumeCoordSpaceMult.x), (float)(pos.y * volumeCoordSpaceMult.y), (float)(pos.z * volumeCoordSpaceMult.z)); return make_double4(dt * v.x, dt * v.y, dt * v.z, v.w); } __device__ double4 rk4Integrate(double3 pos, double dt, float3 volumeCoordSpaceMult) { float4 k1 = tex3D(vectorFieldTex, (float)(pos.x * volumeCoordSpaceMult.x), (float)(pos.y * volumeCoordSpaceMult.y), (float)(pos.z * volumeCoordSpaceMult.z)); double dtHalf = dt * 0.5; float4 k2 = tex3D(vectorFieldTex, (float)((pos.x + dtHalf * k1.x) * volumeCoordSpaceMult.x), (float)((pos.y + dtHalf * k1.y) * volumeCoordSpaceMult.y), (float)((pos.z + dtHalf * k1.z) * volumeCoordSpaceMult.z)); float4 k3 = tex3D(vectorFieldTex, (float)((pos.x + dtHalf * k2.x) * volumeCoordSpaceMult.x), (float)((pos.y + dtHalf * k2.y) * volumeCoordSpaceMult.y), (float)((pos.z + dtHalf * k2.z) * volumeCoordSpaceMult.z)); float4 k4 = tex3D(vectorFieldTex, (float)((pos.x + dt * k3.x) * volumeCoordSpaceMult.x), (float)((pos.y + dt * k3.y) * volumeCoordSpaceMult.y), (float)((pos.z + dt * k3.z) * volumeCoordSpaceMult.z)); double dtSixth = dt / 6.0; return make_double4( dtSixth * ((double)k1.x + 2.0 * ((double)k2.x + (double)k3.x) + (double)k4.x), dtSixth * ((double)k1.y + 2.0 * ((double)k2.y + (double)k3.y) + (double)k4.y), dtSixth * ((double)k1.z + 2.0 * ((double)k2.z + (double)k3.z) + (double)k4.z), ((double)k1.w + 2.0 * ((double)k2.w + (double)k3.w) + (double)k4.w) / 6.0); } __global__ void computeStreamlinesLineKernel(float3* seeds, uint seedsCount, double dt, uint maxSteps, hipExtent volumeSize, float3 volumeCoordSpaceMult, bool useRk4, uint geometrySampling, float mangitudeCtfNormalizeMult, float3* outputPts, uint* outComputedSteps, float3* outVertexColors) { uint id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (id >= seedsCount) { return; } uint outputPos = id * (maxSteps / geometrySampling + 1); double3 position = make_double3(seeds[id].x, seeds[id].y, seeds[id].z); //printf("[%i] Pos: %f %f %f\n", id, position.x, position.y, position.z); //printf("[%i] World: %i %i %i\n", id, volumeSize.width, volumeSize.height, volumeSize.depth); double3 maxWorld; maxWorld.x = (double)volumeSize.width / volumeCoordSpaceMult.x; maxWorld.y = (double)volumeSize.height / volumeCoordSpaceMult.y; maxWorld.z = (double)volumeSize.depth / volumeCoordSpaceMult.z; //printf("Start pos: %f %f %f\n", position.x, position.y, position.z); outputPts[outputPos].x = (float)position.x; outputPts[outputPos].y = (float)position.y; outputPts[outputPos].z = (float)position.z; ++outputPos; uint geometryStep = geometrySampling; uint step = 1; for (; step < maxSteps; ++step) { if (position.x < 0 || position.y < 0 || position.z < 0 || position.x > maxWorld.x || position.y > maxWorld.y || position.z > maxWorld.z) { //printf("Break at pos: %f %f %f\n", position.x, position.y, position.z); break; } double4 dv = useRk4 ? rk4Integrate(position, dt, volumeCoordSpaceMult) : eulerIntegrate(position, dt, volumeCoordSpaceMult); //printf("Vector: %f %f %f\n", dv.x, dv.y, dv.z); position.x += dv.x; position.y += dv.y; position.z += dv.z; --geometryStep; if (geometryStep == 0) { geometryStep = geometrySampling; outputPts[outputPos].x = (float)position.x; outputPts[outputPos].y = (float)position.y; outputPts[outputPos].z = (float)position.z; //printf("New pos: %f %f %f\n", position.x, position.y, position.z); float4 color = tex1D(vectorMangitudeCtfTex, (float)(dv.w * mangitudeCtfNormalizeMult)); //printf("Color (%f -> %f): %f %f %f\n", vector.w, vector.w * mangitudeCtfNormalizeMult, color.x, color.y, color.z); outVertexColors[outputPos - 1].x = color.x; outVertexColors[outputPos - 1].y = color.y; outVertexColors[outputPos - 1].z = color.z; ++outputPos; } } float4 vector = tex3D(vectorFieldTex, (float)(position.x * volumeCoordSpaceMult.x), (float)(position.y * volumeCoordSpaceMult.y), (float)(position.z * volumeCoordSpaceMult.z)); float4 color = tex1D(vectorMangitudeCtfTex, vector.w * mangitudeCtfNormalizeMult); outVertexColors[outputPos - 1].x = color.x; outVertexColors[outputPos - 1].y = color.y; outVertexColors[outputPos - 1].z = color.z; outComputedSteps[id] = step / geometrySampling; } extern "C" void runStreamlinesLineKernel(float3* seeds, uint seedsCount, double dt, uint maxSteps, hipExtent volumeSize, float3 volumeCoordSpaceMult, bool useRk4, uint geometrySampling, float3* outputPts, uint* outComputedSteps, float3* outVertexColors) { ushort threadsCount = 32; uint requredBlocksCount = (seedsCount + threadsCount - 1) / threadsCount; if (requredBlocksCount > 1024) { threadsCount = 256; requredBlocksCount = (seedsCount + threadsCount - 1) / threadsCount; } assert(requredBlocksCount < 65536); ushort blocksCount = (ushort)requredBlocksCount; hipLaunchKernelGGL(( computeStreamlinesLineKernel), dim3(blocksCount), dim3(threadsCount), 0, 0, seeds, seedsCount, dt, maxSteps, volumeSize, volumeCoordSpaceMult, useRk4, geometrySampling, (vectorMangitudeCtfLength / maxMangitude), outputPts, outComputedSteps, outVertexColors); checkCudaErrors(hipDeviceSynchronize()); } __device__ void createTubeBaseVertices(float3 pos, float3 v, float radius, uint baseIndex, float3 color, float3* outVetrices, float3* outNormals, float3* outColors) { float3 xAxis = normalize(findPerpendicular(v)); float3 yAxis = normalize(cross(v, xAxis)); //printf("vertices %i\n", baseIndex); outNormals[baseIndex] = xAxis; outVetrices[baseIndex] = pos + xAxis * radius; // x * cos(0) + y * sin (0) outColors[baseIndex] = color; ++baseIndex; v = 0.3090f * xAxis + 0.9511f * yAxis; outNormals[baseIndex] = v; outVetrices[baseIndex] = pos + v * radius; // x * cos(72) + y * sin (72) outColors[baseIndex] = color; ++baseIndex; v = -0.8090f * xAxis + 0.5878f * yAxis; outNormals[baseIndex] = v; outVetrices[baseIndex] = pos + v * radius; outColors[baseIndex] = color; ++baseIndex; v = -0.8090f * xAxis - 0.5878f * yAxis; outNormals[baseIndex] = v; outVetrices[baseIndex] = pos + v * radius; // x * cos(216) + y * sin (216) outColors[baseIndex] = color; ++baseIndex; v = 0.3090f * xAxis - 0.9511f * yAxis; outNormals[baseIndex] = v; outVetrices[baseIndex] = pos + v * radius; // x * cos(288) + y * sin (288) outColors[baseIndex] = color; } __device__ void createTubeIndices(uint vertexBaseId, uint baseFaceId, uint3* outFaces) { //printf("v %i, i %i \n", vertexBaseId, baseFaceId); for (uint i = 0; i < 5; ++i) { uint iNext = (i + 1) % 5; outFaces[baseFaceId++] = make_uint3(vertexBaseId + i, vertexBaseId + iNext, vertexBaseId - 5 + iNext); outFaces[baseFaceId++] = make_uint3(vertexBaseId + i, vertexBaseId - 5 + i, vertexBaseId - 5 + iNext); } } __global__ void computeStreamtubesLineKernel(float3* seeds, uint seedsCount, double dt, uint maxSteps, hipExtent volumeSize, float3 volumeCoordSpaceMult, float radius, bool useRk4, uint geometrySampling, float mangitudeCtfNormalizeMult, float3* outVetrices, uint* outComputedSteps, uint3* outFaces, float3* outVertexNormals, float3* outVertexColors) { uint id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (id >= seedsCount) { return; } uint outputPos = id * (maxSteps / geometrySampling + 1) * 5; //printf("id: %i, maxSteps: %i, outPos: %i \n", id, maxSteps, outputPos); double3 position = make_double3(seeds[id].x, seeds[id].y, seeds[id].z); float4 vector = tex3D(vectorFieldTex, (float)(position.x * volumeCoordSpaceMult.x), (float)(position.y * volumeCoordSpaceMult.y), (float)(position.z * volumeCoordSpaceMult.z)); float4 color = tex1D(vectorMangitudeCtfTex, vector.w * mangitudeCtfNormalizeMult); createTubeBaseVertices(make_float3(position.x, position.y, position.z), make_float3(vector.x, vector.y, vector.z), radius, outputPos, make_float3(color.x, color.y, color.z), outVetrices, outVertexNormals, outVertexColors); outputPos += 5; double3 maxWorld; maxWorld.x = (double)volumeSize.width / volumeCoordSpaceMult.x; maxWorld.y = (double)volumeSize.height / volumeCoordSpaceMult.y; maxWorld.z = (double)volumeSize.depth / volumeCoordSpaceMult.z; uint geometryStep = geometrySampling; uint step = 1; for (; step < maxSteps; ++step) { if (position.x < 0 || position.y < 0 || position.z < 0 || position.x > maxWorld.x || position.y > maxWorld.y || position.z > maxWorld.z) { break; } double4 dv = useRk4 ? rk4Integrate(position, dt, volumeCoordSpaceMult) : eulerIntegrate(position, dt, volumeCoordSpaceMult); position.x += dv.x; position.y += dv.y; position.z += dv.z; --geometryStep; if (geometryStep == 0) { geometryStep = geometrySampling; color = tex1D(vectorMangitudeCtfTex, (float)(dv.w * mangitudeCtfNormalizeMult)); createTubeBaseVertices(make_float3(position.x, position.y, position.z), make_float3(dv.x, dv.y, dv.z), radius, outputPos, make_float3(color.x, color.y, color.z), outVetrices, outVertexNormals, outVertexColors); createTubeIndices(outputPos, (outputPos - 5 * id - 5) * 2, outFaces); outputPos += 5; } } outComputedSteps[id] = step / geometrySampling; } extern "C" void runStreamtubesLineKernel(float3* seeds, uint seedsCount, double dt, uint maxSteps, hipExtent volumeSize, float3 volumeCoordSpaceMult, float tubeRadius, bool useRk4, uint geometrySampling, float3* outVetrices, uint* outComputedSteps, uint3* outFaces, float3* outVertexNormals, float3* outVertexColors) { ushort threadsCount = 64; uint requredBlocksCount = (seedsCount + threadsCount - 1) / threadsCount; if (requredBlocksCount > 1024) { threadsCount = 256; requredBlocksCount = (seedsCount + threadsCount - 1) / threadsCount; } assert(requredBlocksCount < 65536); ushort blocksCount = (ushort)requredBlocksCount; hipLaunchKernelGGL(( computeStreamtubesLineKernel), dim3(blocksCount), dim3(threadsCount), 0, 0, seeds, seedsCount, dt, maxSteps, volumeSize, volumeCoordSpaceMult, tubeRadius, useRk4, geometrySampling, (vectorMangitudeCtfLength / maxMangitude), outVetrices, outComputedSteps, outFaces, outVertexNormals, outVertexColors); checkCudaErrors(hipDeviceSynchronize()); } __global__ void computeGlyphLinesKernel(float x, uint2 glyphsCount, float2 worldSize, float glyphLength, float3 volumeCoordSpaceMult, float3* outputPts, float mangitudeCtfNormalizeMult, float3* outVertexColors) { uint id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint totalCount = __umul24(glyphsCount.x, glyphsCount.y); if (id >= totalCount) { return; } uint col = id % glyphsCount.x; uint row = id / glyphsCount.x; float3 position = make_float3(x, col * (worldSize.x / glyphsCount.x), row * (worldSize.y / glyphsCount.y)); float4 vector = tex3D(vectorFieldTex, position.x * volumeCoordSpaceMult.x, position.y * volumeCoordSpaceMult.y, position.z * volumeCoordSpaceMult.z); id *= 2; outputPts[id] = position; outputPts[id + 1] = position + normalize(make_float3(vector.x, vector.y, vector.z)) * glyphLength * vector.w * mangitudeCtfNormalizeMult * 0.5; float4 color = tex1D(vectorMangitudeCtfTex, vector.w * mangitudeCtfNormalizeMult); outVertexColors[id].x = color.x; outVertexColors[id].y = color.y; outVertexColors[id].z = color.z; outVertexColors[id + 1].x = color.x; outVertexColors[id + 1].y = color.y; outVertexColors[id + 1].z = color.z; } extern "C" void runGlyphLinesKernel(float x, uint2 glyphsCount, float2 worldSize, float glyphLength, float3 volumeCoordSpaceMult, float3* outputPts, float3* outVertexColors) { ushort threadsCount = 256; uint requredBlocksCount = (glyphsCount.x * glyphsCount.y + threadsCount - 1) / threadsCount; assert(requredBlocksCount < 65536); ushort blocksCount = (ushort)requredBlocksCount; hipLaunchKernelGGL(( computeGlyphLinesKernel), dim3(blocksCount), dim3(threadsCount), 0, 0, x, glyphsCount, worldSize, glyphLength, volumeCoordSpaceMult, outputPts, (vectorMangitudeCtfLength / maxMangitude), outVertexColors); checkCudaErrors(hipDeviceSynchronize()); } __global__ void computeGlyphArrowsKernel(float x, uint2 glyphsCount, float2 worldSize, float glyphLength, float3 volumeCoordSpaceMult, float mangitudeCtfNormalizeMult, float3* outVertices, uint3* outFaces, float3* outVertexNormals, float3* outVertexColors) { uint id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint totalCount = __umul24(glyphsCount.x, glyphsCount.y); if (id >= totalCount) { return; } uint col = id % glyphsCount.x; uint row = id / glyphsCount.x; float3 position = make_float3(x, col * (worldSize.x / glyphsCount.x), row * (worldSize.y / glyphsCount.y)); float4 vector = tex3D(vectorFieldTex, position.x * volumeCoordSpaceMult.x, position.y * volumeCoordSpaceMult.y, position.z * volumeCoordSpaceMult.z); float3 forward = normalize(make_float3(vector.x, vector.y, vector.z)); float3 xAxis = normalize(findPerpendicular(forward)); float3 yAxis = normalize(cross(forward, xAxis)); uint faceId = id * 6; uint vertexId = id * 9; outFaces[faceId] = make_uint3(vertexId, vertexId + 1, vertexId + 2); outFaces[faceId + 1] = make_uint3(vertexId, vertexId + 2, vertexId + 3); outFaces[faceId + 2] = make_uint3(vertexId, vertexId + 3, vertexId + 4); outFaces[faceId + 3] = make_uint3(vertexId, vertexId + 4, vertexId + 1); outFaces[faceId + 4] = make_uint3(vertexId + 5, vertexId + 6, vertexId +7); outFaces[faceId + 5] = make_uint3(vertexId + 5, vertexId + 7, vertexId + 8); id *= 9; outVertexNormals[id] = forward; outVertexNormals[id + 1] = xAxis; outVertexNormals[id + 2] = yAxis; outVertexNormals[id + 3] = -xAxis; outVertexNormals[id + 4] = -yAxis; forward *= -1; outVertexNormals[id + 5] = forward; outVertexNormals[id + 6] = forward; outVertexNormals[id + 7] = forward; outVertexNormals[id + 8] = forward; forward *= glyphLength * vector.w * mangitudeCtfNormalizeMult * 0.5; xAxis *= glyphLength * 0.1; yAxis *= glyphLength * 0.1; //printf("Pos: %f %f %f\n", xAxis.x, xAxis.y, xAxis.z); outVertices[id] = position - forward; // forward was multiplied by -1 outVertices[id + 1] = position + xAxis; outVertices[id + 2] = position + yAxis; outVertices[id + 3] = position - xAxis; outVertices[id + 4] = position - yAxis; outVertices[id + 5] = position + xAxis; outVertices[id + 6] = position + yAxis; outVertices[id + 7] = position - xAxis; outVertices[id + 8] = position - yAxis; float4 color = tex1D(vectorMangitudeCtfTex, vector.w * mangitudeCtfNormalizeMult); float3 color3 = make_float3(color.x, color.y, color.z); outVertexColors[id] = color3; outVertexColors[id + 1] = color3; outVertexColors[id + 2] = color3; outVertexColors[id + 3] = color3; outVertexColors[id + 4] = color3; outVertexColors[id + 5] = color3; outVertexColors[id + 6] = color3; outVertexColors[id + 7] = color3; outVertexColors[id + 8] = color3; } extern "C" void runGlyphArrowsKernel(float x, uint2 glyphsCount, float2 worldSize, float glyphLength, float3 volumeCoordSpaceMult, float3* outVertices, uint3* outFaces, float3* outVertexNormals, float3* outVertexColors) { ushort threadsCount = 256; uint requredBlocksCount = (glyphsCount.x * glyphsCount.y + threadsCount - 1) / threadsCount; assert(requredBlocksCount < 65536); ushort blocksCount = (ushort)requredBlocksCount; hipLaunchKernelGGL(( computeGlyphArrowsKernel), dim3(blocksCount), dim3(threadsCount), 0, 0, x, glyphsCount, worldSize, glyphLength, volumeCoordSpaceMult, (vectorMangitudeCtfLength / maxMangitude), outVertices, outFaces, outVertexNormals, outVertexColors); checkCudaErrors(hipDeviceSynchronize()); } __global__ void computeStreamSurfaceKernel(uint2* linePairs, uint linePairsCount, float3* lineVertices, uint verticesPerLine, uint* lineLengths, uint3* outFaces, uint* outFacesCounts, float3* outNormals) { uint id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (id >= linePairsCount) { //printf("%i: too much\n", id); return; } uint2 currPair = linePairs[id]; uint2 lengths = make_uint2(lineLengths[currPair.x], lineLengths[currPair.y]); if (lengths.x < 2 || lengths.y < 2) { outFacesCounts[id] = 0; //printf("%i: too much\n", id); return; } //printf("%i: lines %i, %i; lengths: %i, %i\n", id, currPair.x, currPair.y, lengths.x ,lengths.y); uint line1Offset = currPair.x * verticesPerLine; uint line2Offset = currPair.y * verticesPerLine; //printf("[%i] vpl: %i\n", id, verticesPerLine); float3* line1 = lineVertices + line1Offset; float3* line2 = lineVertices + line2Offset; float3* normals1 = outNormals + line1Offset; float3* normals2 = outNormals + line2Offset; uint maxFaces = verticesPerLine * 2 - 2; uint3* faces = outFaces + id * maxFaces; uint2 currIndex = make_uint2(0, 0); //float totalMaxAllowedLineDist = max(1.0f, 8.0f * max(length(line1[0] - line2[0]), length(line1[0] - line2[1]))); //float maxAllowedLineDist = 2.0f * length(line1[0] - line2[0]);//length(line1[0] - line2[1]) + length(line2[0] - line1[1]); float lastMinDist = length(line1[0] - line2[0]); uint oneConnections = 0; uint twoConnections = 0; uint maxConnections = 8; uint faceId; for (faceId = 0; faceId < maxFaces; ++faceId) { if (currIndex.x + 1 >= lengths.x || currIndex.y + 1 >= lengths.y) { break; } float dist1 = (currIndex.x + 1 < lengths.x) ? length(line1[currIndex.x + 1] - line2[currIndex.y]) : (1.0f / 0.0f); float dist2 = (currIndex.y + 1 < lengths.y) ? length(line1[currIndex.x] - line2[currIndex.y + 1]) : (1.0f / 0.0f); uint newVertexIndex; float3 newVertex; uint2 nextIndex; float minDist; if (dist1 <= dist2) { if (oneConnections > maxConnections) { break; } ++oneConnections; twoConnections = 0; minDist = dist1; newVertexIndex = line1Offset + currIndex.x + 1; newVertex = line1[currIndex.x + 1]; nextIndex = make_uint2(currIndex.x + 1, currIndex.y); } else if (dist2 < dist1) { if (twoConnections > maxConnections) { break; } ++twoConnections; oneConnections = 0; minDist = dist2; newVertexIndex = line2Offset + currIndex.y + 1; newVertex = line2[currIndex.y + 1]; nextIndex = make_uint2(currIndex.x, currIndex.y + 1); } float lenDirect = length(line1[currIndex.x] - line2[currIndex.y]); minDist = min(minDist, lenDirect); //if (/*minDist > maxAllowedLineDist || */minDist > totalMaxAllowedLineDist) { // break; //} float distRatio = minDist / lastMinDist; if (distRatio > 1.5) { //printf("%i: dist ratio %f\n", id, distRatio); break; } //maxAllowedLineDist = (7.0f * maxAllowedLineDist + 2.0f * minDist) / 8.0f; faces[faceId] = make_uint3(line1Offset + currIndex.x, line2Offset + currIndex.y, newVertexIndex); //printf("%i: faceId %i [%i, %i, %i] (dist1: %f, dist2: %f)\n", id, faceId, faces[faceId].x, faces[faceId].y, faces[faceId].z, dist1, dist2); float3 normal = cross(line1[currIndex.x] - line2[currIndex.y], newVertex - line2[currIndex.y]); normal = normalize(normal); normals1[currIndex.x] = normal; normals2[currIndex.y] = normal; currIndex = nextIndex; lastMinDist = (3.0f * lastMinDist + minDist) / 4.0f; } //printf("[%i] faces: %i\n", id, faceId); outFacesCounts[id] = faceId; } extern "C" void runLineStreamSurfaceKernel(uint2* linePairs, uint linePairsCount, float3* lineVertices, uint verticesPerLine, uint* lineLengths, uint3* outFaces, uint* outFacesCounts, float3* outNormals) { ushort threadsCount = 32; uint requredBlocksCount = (linePairsCount + threadsCount - 1) / threadsCount; if (requredBlocksCount > 1024) { threadsCount = 256; requredBlocksCount = (linePairsCount + threadsCount - 1) / threadsCount; } assert(requredBlocksCount < 65536); ushort blocksCount = (ushort)requredBlocksCount; hipLaunchKernelGGL(( computeStreamSurfaceKernel), dim3(blocksCount), dim3(threadsCount), 0, 0, linePairs, linePairsCount, lineVertices, verticesPerLine, lineLengths, outFaces, outFacesCounts, outNormals); checkCudaErrors(hipDeviceSynchronize()); } }
2c69ddec5a1970b61b572e21fe83c122fa40e704.cu
#include "stdafx.h" #include "CudaHelper.h" #include "CudaMathHelper.h" namespace mf { texture<float4, cudaTextureType1D, cudaReadModeElementType> vectorMangitudeCtfTex; // 1D texture for color transfer function. texture<float4, cudaTextureType3D, cudaReadModeElementType> vectorFieldTex; // 3D texture for storing of volumetric vector field. cudaArray* d_volumeArray = nullptr; cudaArray* d_vectorMangitudeCtfData = nullptr; float vectorMangitudeCtfLength; float maxMangitude; extern "C" void initCuda(const float4* h_volume, cudaExtent volumeSize, const std::vector<float4>& vectorMangitudeCtf, float maxVectorMangitude) { { std::cout << "Initializing vector magnitude color transfer function." << std::endl; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>(); checkCudaErrors(cudaMallocArray(&d_vectorMangitudeCtfData, &channelDesc, vectorMangitudeCtf.size(), 1)); checkCudaErrors(cudaMemcpyToArray(d_vectorMangitudeCtfData, 0, 0, &vectorMangitudeCtf[0], sizeof(float4) * vectorMangitudeCtf.size(), cudaMemcpyHostToDevice)); vectorMangitudeCtfTex.normalized = false; vectorMangitudeCtfTex.filterMode = cudaFilterModeLinear; vectorMangitudeCtfTex.addressMode[0] = cudaAddressModeClamp; checkCudaErrors(cudaBindTextureToArray(vectorMangitudeCtfTex, d_vectorMangitudeCtfData, channelDesc)); vectorMangitudeCtfLength = (float)vectorMangitudeCtf.size(); maxMangitude = maxVectorMangitude; } { // allocate 3D array std::cout << "Allocating CUDA 3D array in device." << std::endl; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>(); checkCudaErrors(cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize)); // copy data to 3D array std::cout << "Copying data to device." << std::endl; cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)h_volume, volumeSize.width * sizeof(float4), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; checkCudaErrors(cudaMemcpy3D(&copyParams)); // set texture parameters vectorFieldTex.normalized = false; vectorFieldTex.filterMode = cudaFilterModeLinear; // linear interpolation vectorFieldTex.addressMode[0] = cudaAddressModeClamp; vectorFieldTex.addressMode[1] = cudaAddressModeClamp; vectorFieldTex.addressMode[2] = cudaAddressModeClamp; // bind array to 3D texture std::cout << "Binding 3D texture." << std::endl; checkCudaErrors(cudaBindTextureToArray(vectorFieldTex, d_volumeArray, channelDesc)); std::cout << "Volume data successfully copied to device." << std::endl; } checkCudaErrors(cudaDeviceSynchronize()); } __inline__ __device__ float3 findPerpendicular(float3 v) { /*float ax = abs(v.x); float ay = abs(v.y); float az = abs(v.z); if (ax >= az && ay >= az) { // ax, ay are dominant return make_float3(-v.y, v.x, 0.0f); } else if (ax >= ay && az >= ay) { // ax, az are dominant return make_float3(-v.z, 0.0f, v.x); } else { // ay, az are dominant return make_float3(0.0f, -v.z, v.y); }*/ return make_float3(-v.y, v.x, 0.0f); } __device__ double4 eulerIntegrate(double3 pos, double dt, float3 volumeCoordSpaceMult) { float4 v = tex3D(vectorFieldTex, (float)(pos.x * volumeCoordSpaceMult.x), (float)(pos.y * volumeCoordSpaceMult.y), (float)(pos.z * volumeCoordSpaceMult.z)); return make_double4(dt * v.x, dt * v.y, dt * v.z, v.w); } __device__ double4 rk4Integrate(double3 pos, double dt, float3 volumeCoordSpaceMult) { float4 k1 = tex3D(vectorFieldTex, (float)(pos.x * volumeCoordSpaceMult.x), (float)(pos.y * volumeCoordSpaceMult.y), (float)(pos.z * volumeCoordSpaceMult.z)); double dtHalf = dt * 0.5; float4 k2 = tex3D(vectorFieldTex, (float)((pos.x + dtHalf * k1.x) * volumeCoordSpaceMult.x), (float)((pos.y + dtHalf * k1.y) * volumeCoordSpaceMult.y), (float)((pos.z + dtHalf * k1.z) * volumeCoordSpaceMult.z)); float4 k3 = tex3D(vectorFieldTex, (float)((pos.x + dtHalf * k2.x) * volumeCoordSpaceMult.x), (float)((pos.y + dtHalf * k2.y) * volumeCoordSpaceMult.y), (float)((pos.z + dtHalf * k2.z) * volumeCoordSpaceMult.z)); float4 k4 = tex3D(vectorFieldTex, (float)((pos.x + dt * k3.x) * volumeCoordSpaceMult.x), (float)((pos.y + dt * k3.y) * volumeCoordSpaceMult.y), (float)((pos.z + dt * k3.z) * volumeCoordSpaceMult.z)); double dtSixth = dt / 6.0; return make_double4( dtSixth * ((double)k1.x + 2.0 * ((double)k2.x + (double)k3.x) + (double)k4.x), dtSixth * ((double)k1.y + 2.0 * ((double)k2.y + (double)k3.y) + (double)k4.y), dtSixth * ((double)k1.z + 2.0 * ((double)k2.z + (double)k3.z) + (double)k4.z), ((double)k1.w + 2.0 * ((double)k2.w + (double)k3.w) + (double)k4.w) / 6.0); } __global__ void computeStreamlinesLineKernel(float3* seeds, uint seedsCount, double dt, uint maxSteps, cudaExtent volumeSize, float3 volumeCoordSpaceMult, bool useRk4, uint geometrySampling, float mangitudeCtfNormalizeMult, float3* outputPts, uint* outComputedSteps, float3* outVertexColors) { uint id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (id >= seedsCount) { return; } uint outputPos = id * (maxSteps / geometrySampling + 1); double3 position = make_double3(seeds[id].x, seeds[id].y, seeds[id].z); //printf("[%i] Pos: %f %f %f\n", id, position.x, position.y, position.z); //printf("[%i] World: %i %i %i\n", id, volumeSize.width, volumeSize.height, volumeSize.depth); double3 maxWorld; maxWorld.x = (double)volumeSize.width / volumeCoordSpaceMult.x; maxWorld.y = (double)volumeSize.height / volumeCoordSpaceMult.y; maxWorld.z = (double)volumeSize.depth / volumeCoordSpaceMult.z; //printf("Start pos: %f %f %f\n", position.x, position.y, position.z); outputPts[outputPos].x = (float)position.x; outputPts[outputPos].y = (float)position.y; outputPts[outputPos].z = (float)position.z; ++outputPos; uint geometryStep = geometrySampling; uint step = 1; for (; step < maxSteps; ++step) { if (position.x < 0 || position.y < 0 || position.z < 0 || position.x > maxWorld.x || position.y > maxWorld.y || position.z > maxWorld.z) { //printf("Break at pos: %f %f %f\n", position.x, position.y, position.z); break; } double4 dv = useRk4 ? rk4Integrate(position, dt, volumeCoordSpaceMult) : eulerIntegrate(position, dt, volumeCoordSpaceMult); //printf("Vector: %f %f %f\n", dv.x, dv.y, dv.z); position.x += dv.x; position.y += dv.y; position.z += dv.z; --geometryStep; if (geometryStep == 0) { geometryStep = geometrySampling; outputPts[outputPos].x = (float)position.x; outputPts[outputPos].y = (float)position.y; outputPts[outputPos].z = (float)position.z; //printf("New pos: %f %f %f\n", position.x, position.y, position.z); float4 color = tex1D(vectorMangitudeCtfTex, (float)(dv.w * mangitudeCtfNormalizeMult)); //printf("Color (%f -> %f): %f %f %f\n", vector.w, vector.w * mangitudeCtfNormalizeMult, color.x, color.y, color.z); outVertexColors[outputPos - 1].x = color.x; outVertexColors[outputPos - 1].y = color.y; outVertexColors[outputPos - 1].z = color.z; ++outputPos; } } float4 vector = tex3D(vectorFieldTex, (float)(position.x * volumeCoordSpaceMult.x), (float)(position.y * volumeCoordSpaceMult.y), (float)(position.z * volumeCoordSpaceMult.z)); float4 color = tex1D(vectorMangitudeCtfTex, vector.w * mangitudeCtfNormalizeMult); outVertexColors[outputPos - 1].x = color.x; outVertexColors[outputPos - 1].y = color.y; outVertexColors[outputPos - 1].z = color.z; outComputedSteps[id] = step / geometrySampling; } extern "C" void runStreamlinesLineKernel(float3* seeds, uint seedsCount, double dt, uint maxSteps, cudaExtent volumeSize, float3 volumeCoordSpaceMult, bool useRk4, uint geometrySampling, float3* outputPts, uint* outComputedSteps, float3* outVertexColors) { ushort threadsCount = 32; uint requredBlocksCount = (seedsCount + threadsCount - 1) / threadsCount; if (requredBlocksCount > 1024) { threadsCount = 256; requredBlocksCount = (seedsCount + threadsCount - 1) / threadsCount; } assert(requredBlocksCount < 65536); ushort blocksCount = (ushort)requredBlocksCount; computeStreamlinesLineKernel<<<blocksCount, threadsCount>>>(seeds, seedsCount, dt, maxSteps, volumeSize, volumeCoordSpaceMult, useRk4, geometrySampling, (vectorMangitudeCtfLength / maxMangitude), outputPts, outComputedSteps, outVertexColors); checkCudaErrors(cudaDeviceSynchronize()); } __device__ void createTubeBaseVertices(float3 pos, float3 v, float radius, uint baseIndex, float3 color, float3* outVetrices, float3* outNormals, float3* outColors) { float3 xAxis = normalize(findPerpendicular(v)); float3 yAxis = normalize(cross(v, xAxis)); //printf("vertices %i\n", baseIndex); outNormals[baseIndex] = xAxis; outVetrices[baseIndex] = pos + xAxis * radius; // x * cos(0) + y * sin (0) outColors[baseIndex] = color; ++baseIndex; v = 0.3090f * xAxis + 0.9511f * yAxis; outNormals[baseIndex] = v; outVetrices[baseIndex] = pos + v * radius; // x * cos(72) + y * sin (72) outColors[baseIndex] = color; ++baseIndex; v = -0.8090f * xAxis + 0.5878f * yAxis; outNormals[baseIndex] = v; outVetrices[baseIndex] = pos + v * radius; outColors[baseIndex] = color; ++baseIndex; v = -0.8090f * xAxis - 0.5878f * yAxis; outNormals[baseIndex] = v; outVetrices[baseIndex] = pos + v * radius; // x * cos(216) + y * sin (216) outColors[baseIndex] = color; ++baseIndex; v = 0.3090f * xAxis - 0.9511f * yAxis; outNormals[baseIndex] = v; outVetrices[baseIndex] = pos + v * radius; // x * cos(288) + y * sin (288) outColors[baseIndex] = color; } __device__ void createTubeIndices(uint vertexBaseId, uint baseFaceId, uint3* outFaces) { //printf("v %i, i %i \n", vertexBaseId, baseFaceId); for (uint i = 0; i < 5; ++i) { uint iNext = (i + 1) % 5; outFaces[baseFaceId++] = make_uint3(vertexBaseId + i, vertexBaseId + iNext, vertexBaseId - 5 + iNext); outFaces[baseFaceId++] = make_uint3(vertexBaseId + i, vertexBaseId - 5 + i, vertexBaseId - 5 + iNext); } } __global__ void computeStreamtubesLineKernel(float3* seeds, uint seedsCount, double dt, uint maxSteps, cudaExtent volumeSize, float3 volumeCoordSpaceMult, float radius, bool useRk4, uint geometrySampling, float mangitudeCtfNormalizeMult, float3* outVetrices, uint* outComputedSteps, uint3* outFaces, float3* outVertexNormals, float3* outVertexColors) { uint id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (id >= seedsCount) { return; } uint outputPos = id * (maxSteps / geometrySampling + 1) * 5; //printf("id: %i, maxSteps: %i, outPos: %i \n", id, maxSteps, outputPos); double3 position = make_double3(seeds[id].x, seeds[id].y, seeds[id].z); float4 vector = tex3D(vectorFieldTex, (float)(position.x * volumeCoordSpaceMult.x), (float)(position.y * volumeCoordSpaceMult.y), (float)(position.z * volumeCoordSpaceMult.z)); float4 color = tex1D(vectorMangitudeCtfTex, vector.w * mangitudeCtfNormalizeMult); createTubeBaseVertices(make_float3(position.x, position.y, position.z), make_float3(vector.x, vector.y, vector.z), radius, outputPos, make_float3(color.x, color.y, color.z), outVetrices, outVertexNormals, outVertexColors); outputPos += 5; double3 maxWorld; maxWorld.x = (double)volumeSize.width / volumeCoordSpaceMult.x; maxWorld.y = (double)volumeSize.height / volumeCoordSpaceMult.y; maxWorld.z = (double)volumeSize.depth / volumeCoordSpaceMult.z; uint geometryStep = geometrySampling; uint step = 1; for (; step < maxSteps; ++step) { if (position.x < 0 || position.y < 0 || position.z < 0 || position.x > maxWorld.x || position.y > maxWorld.y || position.z > maxWorld.z) { break; } double4 dv = useRk4 ? rk4Integrate(position, dt, volumeCoordSpaceMult) : eulerIntegrate(position, dt, volumeCoordSpaceMult); position.x += dv.x; position.y += dv.y; position.z += dv.z; --geometryStep; if (geometryStep == 0) { geometryStep = geometrySampling; color = tex1D(vectorMangitudeCtfTex, (float)(dv.w * mangitudeCtfNormalizeMult)); createTubeBaseVertices(make_float3(position.x, position.y, position.z), make_float3(dv.x, dv.y, dv.z), radius, outputPos, make_float3(color.x, color.y, color.z), outVetrices, outVertexNormals, outVertexColors); createTubeIndices(outputPos, (outputPos - 5 * id - 5) * 2, outFaces); outputPos += 5; } } outComputedSteps[id] = step / geometrySampling; } extern "C" void runStreamtubesLineKernel(float3* seeds, uint seedsCount, double dt, uint maxSteps, cudaExtent volumeSize, float3 volumeCoordSpaceMult, float tubeRadius, bool useRk4, uint geometrySampling, float3* outVetrices, uint* outComputedSteps, uint3* outFaces, float3* outVertexNormals, float3* outVertexColors) { ushort threadsCount = 64; uint requredBlocksCount = (seedsCount + threadsCount - 1) / threadsCount; if (requredBlocksCount > 1024) { threadsCount = 256; requredBlocksCount = (seedsCount + threadsCount - 1) / threadsCount; } assert(requredBlocksCount < 65536); ushort blocksCount = (ushort)requredBlocksCount; computeStreamtubesLineKernel<<<blocksCount, threadsCount>>>(seeds, seedsCount, dt, maxSteps, volumeSize, volumeCoordSpaceMult, tubeRadius, useRk4, geometrySampling, (vectorMangitudeCtfLength / maxMangitude), outVetrices, outComputedSteps, outFaces, outVertexNormals, outVertexColors); checkCudaErrors(cudaDeviceSynchronize()); } __global__ void computeGlyphLinesKernel(float x, uint2 glyphsCount, float2 worldSize, float glyphLength, float3 volumeCoordSpaceMult, float3* outputPts, float mangitudeCtfNormalizeMult, float3* outVertexColors) { uint id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint totalCount = __umul24(glyphsCount.x, glyphsCount.y); if (id >= totalCount) { return; } uint col = id % glyphsCount.x; uint row = id / glyphsCount.x; float3 position = make_float3(x, col * (worldSize.x / glyphsCount.x), row * (worldSize.y / glyphsCount.y)); float4 vector = tex3D(vectorFieldTex, position.x * volumeCoordSpaceMult.x, position.y * volumeCoordSpaceMult.y, position.z * volumeCoordSpaceMult.z); id *= 2; outputPts[id] = position; outputPts[id + 1] = position + normalize(make_float3(vector.x, vector.y, vector.z)) * glyphLength * vector.w * mangitudeCtfNormalizeMult * 0.5; float4 color = tex1D(vectorMangitudeCtfTex, vector.w * mangitudeCtfNormalizeMult); outVertexColors[id].x = color.x; outVertexColors[id].y = color.y; outVertexColors[id].z = color.z; outVertexColors[id + 1].x = color.x; outVertexColors[id + 1].y = color.y; outVertexColors[id + 1].z = color.z; } extern "C" void runGlyphLinesKernel(float x, uint2 glyphsCount, float2 worldSize, float glyphLength, float3 volumeCoordSpaceMult, float3* outputPts, float3* outVertexColors) { ushort threadsCount = 256; uint requredBlocksCount = (glyphsCount.x * glyphsCount.y + threadsCount - 1) / threadsCount; assert(requredBlocksCount < 65536); ushort blocksCount = (ushort)requredBlocksCount; computeGlyphLinesKernel<<<blocksCount, threadsCount>>>(x, glyphsCount, worldSize, glyphLength, volumeCoordSpaceMult, outputPts, (vectorMangitudeCtfLength / maxMangitude), outVertexColors); checkCudaErrors(cudaDeviceSynchronize()); } __global__ void computeGlyphArrowsKernel(float x, uint2 glyphsCount, float2 worldSize, float glyphLength, float3 volumeCoordSpaceMult, float mangitudeCtfNormalizeMult, float3* outVertices, uint3* outFaces, float3* outVertexNormals, float3* outVertexColors) { uint id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint totalCount = __umul24(glyphsCount.x, glyphsCount.y); if (id >= totalCount) { return; } uint col = id % glyphsCount.x; uint row = id / glyphsCount.x; float3 position = make_float3(x, col * (worldSize.x / glyphsCount.x), row * (worldSize.y / glyphsCount.y)); float4 vector = tex3D(vectorFieldTex, position.x * volumeCoordSpaceMult.x, position.y * volumeCoordSpaceMult.y, position.z * volumeCoordSpaceMult.z); float3 forward = normalize(make_float3(vector.x, vector.y, vector.z)); float3 xAxis = normalize(findPerpendicular(forward)); float3 yAxis = normalize(cross(forward, xAxis)); uint faceId = id * 6; uint vertexId = id * 9; outFaces[faceId] = make_uint3(vertexId, vertexId + 1, vertexId + 2); outFaces[faceId + 1] = make_uint3(vertexId, vertexId + 2, vertexId + 3); outFaces[faceId + 2] = make_uint3(vertexId, vertexId + 3, vertexId + 4); outFaces[faceId + 3] = make_uint3(vertexId, vertexId + 4, vertexId + 1); outFaces[faceId + 4] = make_uint3(vertexId + 5, vertexId + 6, vertexId +7); outFaces[faceId + 5] = make_uint3(vertexId + 5, vertexId + 7, vertexId + 8); id *= 9; outVertexNormals[id] = forward; outVertexNormals[id + 1] = xAxis; outVertexNormals[id + 2] = yAxis; outVertexNormals[id + 3] = -xAxis; outVertexNormals[id + 4] = -yAxis; forward *= -1; outVertexNormals[id + 5] = forward; outVertexNormals[id + 6] = forward; outVertexNormals[id + 7] = forward; outVertexNormals[id + 8] = forward; forward *= glyphLength * vector.w * mangitudeCtfNormalizeMult * 0.5; xAxis *= glyphLength * 0.1; yAxis *= glyphLength * 0.1; //printf("Pos: %f %f %f\n", xAxis.x, xAxis.y, xAxis.z); outVertices[id] = position - forward; // forward was multiplied by -1 outVertices[id + 1] = position + xAxis; outVertices[id + 2] = position + yAxis; outVertices[id + 3] = position - xAxis; outVertices[id + 4] = position - yAxis; outVertices[id + 5] = position + xAxis; outVertices[id + 6] = position + yAxis; outVertices[id + 7] = position - xAxis; outVertices[id + 8] = position - yAxis; float4 color = tex1D(vectorMangitudeCtfTex, vector.w * mangitudeCtfNormalizeMult); float3 color3 = make_float3(color.x, color.y, color.z); outVertexColors[id] = color3; outVertexColors[id + 1] = color3; outVertexColors[id + 2] = color3; outVertexColors[id + 3] = color3; outVertexColors[id + 4] = color3; outVertexColors[id + 5] = color3; outVertexColors[id + 6] = color3; outVertexColors[id + 7] = color3; outVertexColors[id + 8] = color3; } extern "C" void runGlyphArrowsKernel(float x, uint2 glyphsCount, float2 worldSize, float glyphLength, float3 volumeCoordSpaceMult, float3* outVertices, uint3* outFaces, float3* outVertexNormals, float3* outVertexColors) { ushort threadsCount = 256; uint requredBlocksCount = (glyphsCount.x * glyphsCount.y + threadsCount - 1) / threadsCount; assert(requredBlocksCount < 65536); ushort blocksCount = (ushort)requredBlocksCount; computeGlyphArrowsKernel<<<blocksCount, threadsCount>>>(x, glyphsCount, worldSize, glyphLength, volumeCoordSpaceMult, (vectorMangitudeCtfLength / maxMangitude), outVertices, outFaces, outVertexNormals, outVertexColors); checkCudaErrors(cudaDeviceSynchronize()); } __global__ void computeStreamSurfaceKernel(uint2* linePairs, uint linePairsCount, float3* lineVertices, uint verticesPerLine, uint* lineLengths, uint3* outFaces, uint* outFacesCounts, float3* outNormals) { uint id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (id >= linePairsCount) { //printf("%i: too much\n", id); return; } uint2 currPair = linePairs[id]; uint2 lengths = make_uint2(lineLengths[currPair.x], lineLengths[currPair.y]); if (lengths.x < 2 || lengths.y < 2) { outFacesCounts[id] = 0; //printf("%i: too much\n", id); return; } //printf("%i: lines %i, %i; lengths: %i, %i\n", id, currPair.x, currPair.y, lengths.x ,lengths.y); uint line1Offset = currPair.x * verticesPerLine; uint line2Offset = currPair.y * verticesPerLine; //printf("[%i] vpl: %i\n", id, verticesPerLine); float3* line1 = lineVertices + line1Offset; float3* line2 = lineVertices + line2Offset; float3* normals1 = outNormals + line1Offset; float3* normals2 = outNormals + line2Offset; uint maxFaces = verticesPerLine * 2 - 2; uint3* faces = outFaces + id * maxFaces; uint2 currIndex = make_uint2(0, 0); //float totalMaxAllowedLineDist = max(1.0f, 8.0f * max(length(line1[0] - line2[0]), length(line1[0] - line2[1]))); //float maxAllowedLineDist = 2.0f * length(line1[0] - line2[0]);//length(line1[0] - line2[1]) + length(line2[0] - line1[1]); float lastMinDist = length(line1[0] - line2[0]); uint oneConnections = 0; uint twoConnections = 0; uint maxConnections = 8; uint faceId; for (faceId = 0; faceId < maxFaces; ++faceId) { if (currIndex.x + 1 >= lengths.x || currIndex.y + 1 >= lengths.y) { break; } float dist1 = (currIndex.x + 1 < lengths.x) ? length(line1[currIndex.x + 1] - line2[currIndex.y]) : (1.0f / 0.0f); float dist2 = (currIndex.y + 1 < lengths.y) ? length(line1[currIndex.x] - line2[currIndex.y + 1]) : (1.0f / 0.0f); uint newVertexIndex; float3 newVertex; uint2 nextIndex; float minDist; if (dist1 <= dist2) { if (oneConnections > maxConnections) { break; } ++oneConnections; twoConnections = 0; minDist = dist1; newVertexIndex = line1Offset + currIndex.x + 1; newVertex = line1[currIndex.x + 1]; nextIndex = make_uint2(currIndex.x + 1, currIndex.y); } else if (dist2 < dist1) { if (twoConnections > maxConnections) { break; } ++twoConnections; oneConnections = 0; minDist = dist2; newVertexIndex = line2Offset + currIndex.y + 1; newVertex = line2[currIndex.y + 1]; nextIndex = make_uint2(currIndex.x, currIndex.y + 1); } float lenDirect = length(line1[currIndex.x] - line2[currIndex.y]); minDist = min(minDist, lenDirect); //if (/*minDist > maxAllowedLineDist || */minDist > totalMaxAllowedLineDist) { // break; //} float distRatio = minDist / lastMinDist; if (distRatio > 1.5) { //printf("%i: dist ratio %f\n", id, distRatio); break; } //maxAllowedLineDist = (7.0f * maxAllowedLineDist + 2.0f * minDist) / 8.0f; faces[faceId] = make_uint3(line1Offset + currIndex.x, line2Offset + currIndex.y, newVertexIndex); //printf("%i: faceId %i [%i, %i, %i] (dist1: %f, dist2: %f)\n", id, faceId, faces[faceId].x, faces[faceId].y, faces[faceId].z, dist1, dist2); float3 normal = cross(line1[currIndex.x] - line2[currIndex.y], newVertex - line2[currIndex.y]); normal = normalize(normal); normals1[currIndex.x] = normal; normals2[currIndex.y] = normal; currIndex = nextIndex; lastMinDist = (3.0f * lastMinDist + minDist) / 4.0f; } //printf("[%i] faces: %i\n", id, faceId); outFacesCounts[id] = faceId; } extern "C" void runLineStreamSurfaceKernel(uint2* linePairs, uint linePairsCount, float3* lineVertices, uint verticesPerLine, uint* lineLengths, uint3* outFaces, uint* outFacesCounts, float3* outNormals) { ushort threadsCount = 32; uint requredBlocksCount = (linePairsCount + threadsCount - 1) / threadsCount; if (requredBlocksCount > 1024) { threadsCount = 256; requredBlocksCount = (linePairsCount + threadsCount - 1) / threadsCount; } assert(requredBlocksCount < 65536); ushort blocksCount = (ushort)requredBlocksCount; computeStreamSurfaceKernel<<<blocksCount, threadsCount>>>(linePairs, linePairsCount, lineVertices, verticesPerLine, lineLengths, outFaces, outFacesCounts, outNormals); checkCudaErrors(cudaDeviceSynchronize()); } }
524ecdbcf63608a960dd9e46d68fad25952a2f4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // __constant__ int opDat0_res_calc_stride_OP2CONSTANT; int opDat0_res_calc_stride_OP2HOST=-1; __constant__ int opDat2_res_calc_stride_OP2CONSTANT; int opDat2_res_calc_stride_OP2HOST=-1; //user function __device__ void res_calc( const float *x1, const float *x2, const float *q1, const float *q2, const float *adt1,const float *adt2,float *res1,float *res2) { float dx,dy,mu, ri, p1,vol1, p2,vol2, f; dx = x1[0*opDat0_res_calc_stride_OP2CONSTANT] - x2[0*opDat0_res_calc_stride_OP2CONSTANT]; dy = x1[1*opDat0_res_calc_stride_OP2CONSTANT] - x2[1*opDat0_res_calc_stride_OP2CONSTANT]; ri = 1.0f/q1[0*opDat2_res_calc_stride_OP2CONSTANT]; p1 = gm1*(q1[3*opDat2_res_calc_stride_OP2CONSTANT]-0.5f*ri*(q1[1*opDat2_res_calc_stride_OP2CONSTANT]*q1[1*opDat2_res_calc_stride_OP2CONSTANT]+q1[2*opDat2_res_calc_stride_OP2CONSTANT]*q1[2*opDat2_res_calc_stride_OP2CONSTANT])); vol1 = ri*(q1[1*opDat2_res_calc_stride_OP2CONSTANT]*dy - q1[2*opDat2_res_calc_stride_OP2CONSTANT]*dx); ri = 1.0f/q2[0*opDat2_res_calc_stride_OP2CONSTANT]; p2 = gm1*(q2[3*opDat2_res_calc_stride_OP2CONSTANT]-0.5f*ri*(q2[1*opDat2_res_calc_stride_OP2CONSTANT]*q2[1*opDat2_res_calc_stride_OP2CONSTANT]+q2[2*opDat2_res_calc_stride_OP2CONSTANT]*q2[2*opDat2_res_calc_stride_OP2CONSTANT])); vol2 = ri*(q2[1*opDat2_res_calc_stride_OP2CONSTANT]*dy - q2[2*opDat2_res_calc_stride_OP2CONSTANT]*dx); mu = 0.5f*((*adt1)+(*adt2))*eps; f = 0.5f*(vol1* q1[0*opDat2_res_calc_stride_OP2CONSTANT] + vol2* q2[0*opDat2_res_calc_stride_OP2CONSTANT] ) + mu*(q1[0*opDat2_res_calc_stride_OP2CONSTANT]-q2[0*opDat2_res_calc_stride_OP2CONSTANT]); res1[0] += f; res2[0] -= f; f = 0.5f*(vol1* q1[1*opDat2_res_calc_stride_OP2CONSTANT] + p1*dy + vol2* q2[1*opDat2_res_calc_stride_OP2CONSTANT] + p2*dy) + mu*(q1[1*opDat2_res_calc_stride_OP2CONSTANT]-q2[1*opDat2_res_calc_stride_OP2CONSTANT]); res1[1] += f; res2[1] -= f; f = 0.5f*(vol1* q1[2*opDat2_res_calc_stride_OP2CONSTANT] - p1*dx + vol2* q2[2*opDat2_res_calc_stride_OP2CONSTANT] - p2*dx) + mu*(q1[2*opDat2_res_calc_stride_OP2CONSTANT]-q2[2*opDat2_res_calc_stride_OP2CONSTANT]); res1[2] += f; res2[2] -= f; f = 0.5f*(vol1*(q1[3*opDat2_res_calc_stride_OP2CONSTANT]+p1) + vol2*(q2[3*opDat2_res_calc_stride_OP2CONSTANT]+p2) ) + mu*(q1[3*opDat2_res_calc_stride_OP2CONSTANT]-q2[3*opDat2_res_calc_stride_OP2CONSTANT]); res1[3] += f; res2[3] -= f; } // CUDA kernel function __global__ void op_cuda_res_calc( const float *__restrict ind_arg0, const float *__restrict ind_arg1, const float *__restrict ind_arg2, float *__restrict ind_arg3, const int *__restrict opDat0Map, const int *__restrict opDat2Map, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg6_l[4]; float arg7_l[4]; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){ int col2 = -1; int map0idx; int map1idx; int map2idx; int map3idx; if (n<nelem) { //initialise local variables for ( int d=0; d<4; d++ ){ arg6_l[d] = ZERO_float; } for ( int d=0; d<4; d++ ){ arg7_l[d] = ZERO_float; } map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; map2idx = opDat2Map[n + offset_b + set_size * 0]; map3idx = opDat2Map[n + offset_b + set_size * 1]; //user-supplied kernel call res_calc(ind_arg0+map0idx, ind_arg0+map1idx, ind_arg1+map2idx, ind_arg1+map3idx, ind_arg2+map2idx*1, ind_arg2+map3idx*1, arg6_l, arg7_l); col2 = colors[n+offset_b]; } //store local variables for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg6_l[0] += ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map2idx]; arg6_l[1] += ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map2idx]; arg6_l[2] += ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map2idx]; arg6_l[3] += ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map2idx]; arg7_l[0] += ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map3idx]; arg7_l[1] += ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map3idx]; arg7_l[2] += ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map3idx]; arg7_l[3] += ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map3idx]; ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map2idx] = arg6_l[0]; ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map2idx] = arg6_l[1]; ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map2idx] = arg6_l[2]; ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map2idx] = arg6_l[3]; ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map3idx] = arg7_l[0]; ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map3idx] = arg7_l[1]; ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map3idx] = arg7_l[2]; ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map3idx] = arg7_l[3]; } __syncthreads(); } } } //host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7){ int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(2); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[2].name = name; OP_kernels[2].count += 1; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc\n"); } //get plan #ifdef OP_PART_SIZE_2 int part_size = OP_PART_SIZE_2; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); if ((OP_kernels[2].count==1) || (opDat0_res_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg0))) { opDat0_res_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg0); hipMemcpyToSymbol(opDat0_res_calc_stride_OP2CONSTANT, &opDat0_res_calc_stride_OP2HOST,sizeof(int)); } if ((OP_kernels[2].count==1) || (opDat2_res_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg2))) { opDat2_res_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg2); hipMemcpyToSymbol(opDat2_res_calc_stride_OP2CONSTANT, &opDat2_res_calc_stride_OP2HOST,sizeof(int)); } //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_2 int nthread = OP_BLOCK_SIZE_2; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread), 0, 0, (float *)arg0.data_d, (float *)arg2.data_d, (float *)arg4.data_d, (float *)arg6.data_d, arg0.map_data_d, arg2.map_data_d, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[2].transfer += Plan->transfer; OP_kernels[2].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[2].time += wall_t2 - wall_t1; }
524ecdbcf63608a960dd9e46d68fad25952a2f4c.cu
// // auto-generated by op2.py // __constant__ int opDat0_res_calc_stride_OP2CONSTANT; int opDat0_res_calc_stride_OP2HOST=-1; __constant__ int opDat2_res_calc_stride_OP2CONSTANT; int opDat2_res_calc_stride_OP2HOST=-1; //user function __device__ void res_calc( const float *x1, const float *x2, const float *q1, const float *q2, const float *adt1,const float *adt2,float *res1,float *res2) { float dx,dy,mu, ri, p1,vol1, p2,vol2, f; dx = x1[0*opDat0_res_calc_stride_OP2CONSTANT] - x2[0*opDat0_res_calc_stride_OP2CONSTANT]; dy = x1[1*opDat0_res_calc_stride_OP2CONSTANT] - x2[1*opDat0_res_calc_stride_OP2CONSTANT]; ri = 1.0f/q1[0*opDat2_res_calc_stride_OP2CONSTANT]; p1 = gm1*(q1[3*opDat2_res_calc_stride_OP2CONSTANT]-0.5f*ri*(q1[1*opDat2_res_calc_stride_OP2CONSTANT]*q1[1*opDat2_res_calc_stride_OP2CONSTANT]+q1[2*opDat2_res_calc_stride_OP2CONSTANT]*q1[2*opDat2_res_calc_stride_OP2CONSTANT])); vol1 = ri*(q1[1*opDat2_res_calc_stride_OP2CONSTANT]*dy - q1[2*opDat2_res_calc_stride_OP2CONSTANT]*dx); ri = 1.0f/q2[0*opDat2_res_calc_stride_OP2CONSTANT]; p2 = gm1*(q2[3*opDat2_res_calc_stride_OP2CONSTANT]-0.5f*ri*(q2[1*opDat2_res_calc_stride_OP2CONSTANT]*q2[1*opDat2_res_calc_stride_OP2CONSTANT]+q2[2*opDat2_res_calc_stride_OP2CONSTANT]*q2[2*opDat2_res_calc_stride_OP2CONSTANT])); vol2 = ri*(q2[1*opDat2_res_calc_stride_OP2CONSTANT]*dy - q2[2*opDat2_res_calc_stride_OP2CONSTANT]*dx); mu = 0.5f*((*adt1)+(*adt2))*eps; f = 0.5f*(vol1* q1[0*opDat2_res_calc_stride_OP2CONSTANT] + vol2* q2[0*opDat2_res_calc_stride_OP2CONSTANT] ) + mu*(q1[0*opDat2_res_calc_stride_OP2CONSTANT]-q2[0*opDat2_res_calc_stride_OP2CONSTANT]); res1[0] += f; res2[0] -= f; f = 0.5f*(vol1* q1[1*opDat2_res_calc_stride_OP2CONSTANT] + p1*dy + vol2* q2[1*opDat2_res_calc_stride_OP2CONSTANT] + p2*dy) + mu*(q1[1*opDat2_res_calc_stride_OP2CONSTANT]-q2[1*opDat2_res_calc_stride_OP2CONSTANT]); res1[1] += f; res2[1] -= f; f = 0.5f*(vol1* q1[2*opDat2_res_calc_stride_OP2CONSTANT] - p1*dx + vol2* q2[2*opDat2_res_calc_stride_OP2CONSTANT] - p2*dx) + mu*(q1[2*opDat2_res_calc_stride_OP2CONSTANT]-q2[2*opDat2_res_calc_stride_OP2CONSTANT]); res1[2] += f; res2[2] -= f; f = 0.5f*(vol1*(q1[3*opDat2_res_calc_stride_OP2CONSTANT]+p1) + vol2*(q2[3*opDat2_res_calc_stride_OP2CONSTANT]+p2) ) + mu*(q1[3*opDat2_res_calc_stride_OP2CONSTANT]-q2[3*opDat2_res_calc_stride_OP2CONSTANT]); res1[3] += f; res2[3] -= f; } // CUDA kernel function __global__ void op_cuda_res_calc( const float *__restrict ind_arg0, const float *__restrict ind_arg1, const float *__restrict ind_arg2, float *__restrict ind_arg3, const int *__restrict opDat0Map, const int *__restrict opDat2Map, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg6_l[4]; float arg7_l[4]; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){ int col2 = -1; int map0idx; int map1idx; int map2idx; int map3idx; if (n<nelem) { //initialise local variables for ( int d=0; d<4; d++ ){ arg6_l[d] = ZERO_float; } for ( int d=0; d<4; d++ ){ arg7_l[d] = ZERO_float; } map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; map2idx = opDat2Map[n + offset_b + set_size * 0]; map3idx = opDat2Map[n + offset_b + set_size * 1]; //user-supplied kernel call res_calc(ind_arg0+map0idx, ind_arg0+map1idx, ind_arg1+map2idx, ind_arg1+map3idx, ind_arg2+map2idx*1, ind_arg2+map3idx*1, arg6_l, arg7_l); col2 = colors[n+offset_b]; } //store local variables for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg6_l[0] += ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map2idx]; arg6_l[1] += ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map2idx]; arg6_l[2] += ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map2idx]; arg6_l[3] += ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map2idx]; arg7_l[0] += ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map3idx]; arg7_l[1] += ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map3idx]; arg7_l[2] += ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map3idx]; arg7_l[3] += ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map3idx]; ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map2idx] = arg6_l[0]; ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map2idx] = arg6_l[1]; ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map2idx] = arg6_l[2]; ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map2idx] = arg6_l[3]; ind_arg3[0*opDat2_res_calc_stride_OP2CONSTANT+map3idx] = arg7_l[0]; ind_arg3[1*opDat2_res_calc_stride_OP2CONSTANT+map3idx] = arg7_l[1]; ind_arg3[2*opDat2_res_calc_stride_OP2CONSTANT+map3idx] = arg7_l[2]; ind_arg3[3*opDat2_res_calc_stride_OP2CONSTANT+map3idx] = arg7_l[3]; } __syncthreads(); } } } //host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7){ int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(2); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[2].name = name; OP_kernels[2].count += 1; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc\n"); } //get plan #ifdef OP_PART_SIZE_2 int part_size = OP_PART_SIZE_2; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); if ((OP_kernels[2].count==1) || (opDat0_res_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg0))) { opDat0_res_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg0); cudaMemcpyToSymbol(opDat0_res_calc_stride_OP2CONSTANT, &opDat0_res_calc_stride_OP2HOST,sizeof(int)); } if ((OP_kernels[2].count==1) || (opDat2_res_calc_stride_OP2HOST != getSetSizeFromOpArg(&arg2))) { opDat2_res_calc_stride_OP2HOST = getSetSizeFromOpArg(&arg2); cudaMemcpyToSymbol(opDat2_res_calc_stride_OP2CONSTANT, &opDat2_res_calc_stride_OP2HOST,sizeof(int)); } //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_2 int nthread = OP_BLOCK_SIZE_2; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { op_cuda_res_calc<<<nblocks,nthread>>>( (float *)arg0.data_d, (float *)arg2.data_d, (float *)arg4.data_d, (float *)arg6.data_d, arg0.map_data_d, arg2.map_data_d, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[2].transfer += Plan->transfer; OP_kernels[2].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[2].time += wall_t2 - wall_t1; }
e9ef28e253bf26de22948bd9c36269ef49b15434.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "shuffle.h" #include <cuml/linear_model/preprocess_mg.hpp> #include <cuml/solvers/cd_mg.hpp> #include <functions/softThres.cuh> #include <cumlprims/opg/linalg/mv_aTb.hpp> #include <cumlprims/opg/linalg/norm.hpp> #include "shuffle.h" #include <raft/core/comms.hpp> #include <raft/linalg/add.cuh> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/gemm.cuh> #include <raft/linalg/multiply.cuh> #include <raft/linalg/subtract.cuh> #include <raft/matrix/math.cuh> #include <raft/matrix/matrix.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <cstddef> using namespace MLCommon; namespace ML { namespace CD { namespace opg { template <typename T> void fit_impl(raft::handle_t& handle, std::vector<Matrix::Data<T>*>& input_data, Matrix::PartDescriptor& input_desc, std::vector<Matrix::Data<T>*>& labels, T* coef, T* intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, hipStream_t* streams, int n_streams, bool verbose) { const auto& comm = handle.get_comms(); std::vector<Matrix::RankSizePair*> partsToRanks = input_desc.blocksOwnedBy(comm.get_rank()); size_t total_M = 0.0; for (std::size_t i = 0; i < partsToRanks.size(); i++) { total_M += partsToRanks[i]->size; } rmm::device_uvector<T> pred(total_M, streams[0]); rmm::device_uvector<T> residual(total_M, streams[0]); rmm::device_uvector<T> squared(input_desc.N, streams[0]); rmm::device_uvector<T> mu_input(0, streams[0]); rmm::device_uvector<T> norm2_input(0, streams[0]); rmm::device_uvector<T> mu_labels(0, streams[0]); std::vector<T> h_coef(input_desc.N, T(0)); if (fit_intercept) { mu_input.resize(input_desc.N, streams[0]); mu_labels.resize(1, streams[0]); if (normalize) { norm2_input.resize(input_desc.N, streams[0]); } GLM::opg::preProcessData(handle, input_data, input_desc, labels, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } std::vector<int> ri(input_desc.N); std::mt19937 g(rand()); size_t memsize = input_desc.N * sizeof(int); int* ri_h = (int*)malloc(memsize); RAFT_CUDA_TRY(hipHostRegister(ri_h, memsize, hipHostRegisterDefault)); if (comm.get_rank() == 0) { ML::Solver::initShuffle(ri, g); for (std::size_t i = 0; i < input_desc.N; i++) { ri_h[i] = ri[i]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); T l2_alpha = (1 - l1_ratio) * alpha * input_desc.M; alpha = l1_ratio * alpha * input_desc.M; if (normalize) { T scalar = T(1.0) + l2_alpha; raft::matrix::setValue(squared.data(), squared.data(), scalar, input_desc.N, streams[0]); } else { Matrix::Data<T> squared_data{squared.data(), size_t(input_desc.N)}; LinAlg::opg::colNorm2NoSeq(handle, squared_data, input_data, input_desc, streams, n_streams); raft::linalg::addScalar(squared.data(), squared.data(), l2_alpha, input_desc.N, streams[0]); } std::vector<Matrix::Data<T>*> input_data_temp; Matrix::PartDescriptor input_desc_temp = input_desc; input_desc_temp.N = size_t(1); std::vector<Matrix::Data<T>*> residual_temp; Matrix::Data<T> coef_loc_data; T* rs = residual.data(); for (std::size_t i = 0; i < partsToRanks.size(); i++) { raft::copy(rs, labels[i]->ptr, partsToRanks[i]->size, streams[0]); Matrix::Data<T>* rs_data = new Matrix::Data<T>(); rs_data->ptr = rs; rs_data->totalSize = partsToRanks[i]->size; residual_temp.push_back(rs_data); Matrix::Data<T>* temp_data = new Matrix::Data<T>(); temp_data->totalSize = partsToRanks[i]->size; input_data_temp.push_back(temp_data); rs += partsToRanks[i]->size; } for (int i = 0; i < epochs; i++) { if (i > 0 && shuffle) { if (comm.get_rank() == 0) { Solver::shuffle(ri, g); for (std::size_t k = 0; k < input_desc.N; k++) { ri_h[k] = ri[k]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); } T coef_max = 0.0; T d_coef_max = 0.0; T coef_prev = 0.0; for (std::size_t j = 0; j < input_desc.N; j++) { int ci = ri_h[j]; T* coef_loc = coef + ci; T* squared_loc = squared.data() + ci; T* input_col_loc; T* pred_loc = pred.data(); T* residual_loc = residual.data(); for (std::size_t k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); input_data_temp[k]->ptr = input_col_loc; input_data_temp[k]->totalSize = partsToRanks[k]->size; raft::linalg::multiplyScalar( pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); raft::linalg::add( residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { handle.sync_stream(streams[k]); } coef_loc_data.ptr = coef_loc; coef_loc_data.totalSize = size_t(1); LinAlg::opg::mv_aTb( handle, coef_loc_data, input_data_temp, input_desc_temp, residual_temp, streams, n_streams); if (l1_ratio > T(0.0)) Functions::softThres(coef_loc, coef_loc, alpha, 1, streams[0]); raft::linalg::eltwiseDivideCheckZero(coef_loc, coef_loc, squared_loc, 1, streams[0]); coef_prev = h_coef[ci]; raft::update_host(&(h_coef[ci]), coef_loc, 1, streams[0]); handle.sync_stream(streams[0]); T diff = abs(coef_prev - h_coef[ci]); if (diff > d_coef_max) d_coef_max = diff; if (abs(h_coef[ci]) > coef_max) coef_max = abs(h_coef[ci]); pred_loc = pred.data(); residual_loc = residual.data(); for (std::size_t k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); raft::linalg::multiplyScalar( pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); raft::linalg::subtract( residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { handle.sync_stream(streams[k]); } } bool flag_continue = true; if (coef_max == T(0)) { flag_continue = false; } if ((d_coef_max / coef_max) < tol) { flag_continue = false; } if (!flag_continue) { break; } } RAFT_CUDA_TRY(hipHostUnregister(ri_h)); free(ri_h); for (std::size_t i = 0; i < partsToRanks.size(); i++) { delete residual_temp[i]; delete input_data_temp[i]; } if (fit_intercept) { GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef, intercept, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } else { *intercept = T(0); } } /** * @brief performs MNMG fit operation for the ols * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @input param labels: labels data * @output param coef: learned regression coefficients * @output param intercept: intercept value * @input param fit_intercept: fit intercept or not * @input param normalize: normalize the data or not * @input param verbose */ template <typename T> void fit_impl(raft::handle_t& handle, std::vector<Matrix::Data<T>*>& input_data, Matrix::PartDescriptor& input_desc, std::vector<Matrix::Data<T>*>& labels, T* coef, T* intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, bool verbose) { int rank = handle.get_comms().get_rank(); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = input_desc.blocksOwnedBy(rank).size(); ; hipStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { RAFT_CUDA_TRY(hipStreamCreate(&streams[i])); } fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { handle.sync_stream(streams[i]); } for (int i = 0; i < n_streams; i++) { RAFT_CUDA_TRY(hipStreamDestroy(streams[i])); } } template <typename T> void predict_impl(raft::handle_t& handle, std::vector<Matrix::Data<T>*>& input_data, Matrix::PartDescriptor& input_desc, T* coef, T intercept, std::vector<Matrix::Data<T>*>& preds, hipStream_t* streams, int n_streams, bool verbose) { std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks; T alpha = T(1); T beta = T(0); for (std::size_t i = 0; i < input_data.size(); i++) { int si = i % n_streams; raft::linalg::gemm(handle, input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef, preds[i]->ptr, local_blocks[i]->size, size_t(1), HIPBLAS_OP_N, HIPBLAS_OP_N, alpha, beta, streams[si]); raft::linalg::addScalar( preds[i]->ptr, preds[i]->ptr, intercept, local_blocks[i]->size, streams[si]); } } template <typename T> void predict_impl(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, size_t n_parts, Matrix::Data<T>** input, size_t n_rows, size_t n_cols, T* coef, T intercept, Matrix::Data<T>** preds, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T>*> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T>*> preds_data(preds, preds + n_parts); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = n_parts; hipStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { RAFT_CUDA_TRY(hipStreamCreate(&streams[i])); } predict_impl( handle, input_data, input_desc, coef, intercept, preds_data, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { handle.sync_stream(streams[i]); } for (int i = 0; i < n_streams; i++) { RAFT_CUDA_TRY(hipStreamDestroy(streams[i])); } } void fit(raft::handle_t& handle, std::vector<Matrix::Data<float>*>& input_data, Matrix::PartDescriptor& input_desc, std::vector<Matrix::Data<float>*>& labels, float* coef, float* intercept, bool fit_intercept, bool normalize, int epochs, float alpha, float l1_ratio, bool shuffle, float tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void fit(raft::handle_t& handle, std::vector<Matrix::Data<double>*>& input_data, Matrix::PartDescriptor& input_desc, std::vector<Matrix::Data<double>*>& labels, double* coef, double* intercept, bool fit_intercept, bool normalize, int epochs, double alpha, double l1_ratio, bool shuffle, double tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void predict(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, size_t n_parts, Matrix::Data<float>** input, size_t n_rows, size_t n_cols, float* coef, float intercept, Matrix::Data<float>** preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } void predict(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, size_t n_parts, Matrix::Data<double>** input, size_t n_rows, size_t n_cols, double* coef, double intercept, Matrix::Data<double>** preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } } // namespace opg } // namespace CD } // namespace ML
e9ef28e253bf26de22948bd9c36269ef49b15434.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "shuffle.h" #include <cuml/linear_model/preprocess_mg.hpp> #include <cuml/solvers/cd_mg.hpp> #include <functions/softThres.cuh> #include <cumlprims/opg/linalg/mv_aTb.hpp> #include <cumlprims/opg/linalg/norm.hpp> #include "shuffle.h" #include <raft/core/comms.hpp> #include <raft/linalg/add.cuh> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/gemm.cuh> #include <raft/linalg/multiply.cuh> #include <raft/linalg/subtract.cuh> #include <raft/matrix/math.cuh> #include <raft/matrix/matrix.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <cstddef> using namespace MLCommon; namespace ML { namespace CD { namespace opg { template <typename T> void fit_impl(raft::handle_t& handle, std::vector<Matrix::Data<T>*>& input_data, Matrix::PartDescriptor& input_desc, std::vector<Matrix::Data<T>*>& labels, T* coef, T* intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, cudaStream_t* streams, int n_streams, bool verbose) { const auto& comm = handle.get_comms(); std::vector<Matrix::RankSizePair*> partsToRanks = input_desc.blocksOwnedBy(comm.get_rank()); size_t total_M = 0.0; for (std::size_t i = 0; i < partsToRanks.size(); i++) { total_M += partsToRanks[i]->size; } rmm::device_uvector<T> pred(total_M, streams[0]); rmm::device_uvector<T> residual(total_M, streams[0]); rmm::device_uvector<T> squared(input_desc.N, streams[0]); rmm::device_uvector<T> mu_input(0, streams[0]); rmm::device_uvector<T> norm2_input(0, streams[0]); rmm::device_uvector<T> mu_labels(0, streams[0]); std::vector<T> h_coef(input_desc.N, T(0)); if (fit_intercept) { mu_input.resize(input_desc.N, streams[0]); mu_labels.resize(1, streams[0]); if (normalize) { norm2_input.resize(input_desc.N, streams[0]); } GLM::opg::preProcessData(handle, input_data, input_desc, labels, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } std::vector<int> ri(input_desc.N); std::mt19937 g(rand()); size_t memsize = input_desc.N * sizeof(int); int* ri_h = (int*)malloc(memsize); RAFT_CUDA_TRY(cudaHostRegister(ri_h, memsize, cudaHostRegisterDefault)); if (comm.get_rank() == 0) { ML::Solver::initShuffle(ri, g); for (std::size_t i = 0; i < input_desc.N; i++) { ri_h[i] = ri[i]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); T l2_alpha = (1 - l1_ratio) * alpha * input_desc.M; alpha = l1_ratio * alpha * input_desc.M; if (normalize) { T scalar = T(1.0) + l2_alpha; raft::matrix::setValue(squared.data(), squared.data(), scalar, input_desc.N, streams[0]); } else { Matrix::Data<T> squared_data{squared.data(), size_t(input_desc.N)}; LinAlg::opg::colNorm2NoSeq(handle, squared_data, input_data, input_desc, streams, n_streams); raft::linalg::addScalar(squared.data(), squared.data(), l2_alpha, input_desc.N, streams[0]); } std::vector<Matrix::Data<T>*> input_data_temp; Matrix::PartDescriptor input_desc_temp = input_desc; input_desc_temp.N = size_t(1); std::vector<Matrix::Data<T>*> residual_temp; Matrix::Data<T> coef_loc_data; T* rs = residual.data(); for (std::size_t i = 0; i < partsToRanks.size(); i++) { raft::copy(rs, labels[i]->ptr, partsToRanks[i]->size, streams[0]); Matrix::Data<T>* rs_data = new Matrix::Data<T>(); rs_data->ptr = rs; rs_data->totalSize = partsToRanks[i]->size; residual_temp.push_back(rs_data); Matrix::Data<T>* temp_data = new Matrix::Data<T>(); temp_data->totalSize = partsToRanks[i]->size; input_data_temp.push_back(temp_data); rs += partsToRanks[i]->size; } for (int i = 0; i < epochs; i++) { if (i > 0 && shuffle) { if (comm.get_rank() == 0) { Solver::shuffle(ri, g); for (std::size_t k = 0; k < input_desc.N; k++) { ri_h[k] = ri[k]; } } comm.bcast(ri_h, input_desc.N, 0, streams[0]); comm.sync_stream(streams[0]); } T coef_max = 0.0; T d_coef_max = 0.0; T coef_prev = 0.0; for (std::size_t j = 0; j < input_desc.N; j++) { int ci = ri_h[j]; T* coef_loc = coef + ci; T* squared_loc = squared.data() + ci; T* input_col_loc; T* pred_loc = pred.data(); T* residual_loc = residual.data(); for (std::size_t k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); input_data_temp[k]->ptr = input_col_loc; input_data_temp[k]->totalSize = partsToRanks[k]->size; raft::linalg::multiplyScalar( pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); raft::linalg::add( residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { handle.sync_stream(streams[k]); } coef_loc_data.ptr = coef_loc; coef_loc_data.totalSize = size_t(1); LinAlg::opg::mv_aTb( handle, coef_loc_data, input_data_temp, input_desc_temp, residual_temp, streams, n_streams); if (l1_ratio > T(0.0)) Functions::softThres(coef_loc, coef_loc, alpha, 1, streams[0]); raft::linalg::eltwiseDivideCheckZero(coef_loc, coef_loc, squared_loc, 1, streams[0]); coef_prev = h_coef[ci]; raft::update_host(&(h_coef[ci]), coef_loc, 1, streams[0]); handle.sync_stream(streams[0]); T diff = abs(coef_prev - h_coef[ci]); if (diff > d_coef_max) d_coef_max = diff; if (abs(h_coef[ci]) > coef_max) coef_max = abs(h_coef[ci]); pred_loc = pred.data(); residual_loc = residual.data(); for (std::size_t k = 0; k < input_data.size(); k++) { input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); raft::linalg::multiplyScalar( pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); raft::linalg::subtract( residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); pred_loc = pred_loc + partsToRanks[k]->size; residual_loc = residual_loc + partsToRanks[k]->size; } for (int k = 0; k < n_streams; k++) { handle.sync_stream(streams[k]); } } bool flag_continue = true; if (coef_max == T(0)) { flag_continue = false; } if ((d_coef_max / coef_max) < tol) { flag_continue = false; } if (!flag_continue) { break; } } RAFT_CUDA_TRY(cudaHostUnregister(ri_h)); free(ri_h); for (std::size_t i = 0; i < partsToRanks.size(); i++) { delete residual_temp[i]; delete input_data_temp[i]; } if (fit_intercept) { GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef, intercept, mu_input.data(), mu_labels.data(), norm2_input.data(), fit_intercept, normalize, streams, n_streams, verbose); } else { *intercept = T(0); } } /** * @brief performs MNMG fit operation for the ols * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @input param labels: labels data * @output param coef: learned regression coefficients * @output param intercept: intercept value * @input param fit_intercept: fit intercept or not * @input param normalize: normalize the data or not * @input param verbose */ template <typename T> void fit_impl(raft::handle_t& handle, std::vector<Matrix::Data<T>*>& input_data, Matrix::PartDescriptor& input_desc, std::vector<Matrix::Data<T>*>& labels, T* coef, T* intercept, bool fit_intercept, bool normalize, int epochs, T alpha, T l1_ratio, bool shuffle, T tol, bool verbose) { int rank = handle.get_comms().get_rank(); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = input_desc.blocksOwnedBy(rank).size(); ; cudaStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { RAFT_CUDA_TRY(cudaStreamCreate(&streams[i])); } fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { handle.sync_stream(streams[i]); } for (int i = 0; i < n_streams; i++) { RAFT_CUDA_TRY(cudaStreamDestroy(streams[i])); } } template <typename T> void predict_impl(raft::handle_t& handle, std::vector<Matrix::Data<T>*>& input_data, Matrix::PartDescriptor& input_desc, T* coef, T intercept, std::vector<Matrix::Data<T>*>& preds, cudaStream_t* streams, int n_streams, bool verbose) { std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks; T alpha = T(1); T beta = T(0); for (std::size_t i = 0; i < input_data.size(); i++) { int si = i % n_streams; raft::linalg::gemm(handle, input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef, preds[i]->ptr, local_blocks[i]->size, size_t(1), CUBLAS_OP_N, CUBLAS_OP_N, alpha, beta, streams[si]); raft::linalg::addScalar( preds[i]->ptr, preds[i]->ptr, intercept, local_blocks[i]->size, streams[si]); } } template <typename T> void predict_impl(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, size_t n_parts, Matrix::Data<T>** input, size_t n_rows, size_t n_cols, T* coef, T intercept, Matrix::Data<T>** preds, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T>*> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T>*> preds_data(preds, preds + n_parts); // TODO: These streams should come from raft::handle_t // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 int n_streams = n_parts; cudaStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { RAFT_CUDA_TRY(cudaStreamCreate(&streams[i])); } predict_impl( handle, input_data, input_desc, coef, intercept, preds_data, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { handle.sync_stream(streams[i]); } for (int i = 0; i < n_streams; i++) { RAFT_CUDA_TRY(cudaStreamDestroy(streams[i])); } } void fit(raft::handle_t& handle, std::vector<Matrix::Data<float>*>& input_data, Matrix::PartDescriptor& input_desc, std::vector<Matrix::Data<float>*>& labels, float* coef, float* intercept, bool fit_intercept, bool normalize, int epochs, float alpha, float l1_ratio, bool shuffle, float tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void fit(raft::handle_t& handle, std::vector<Matrix::Data<double>*>& input_data, Matrix::PartDescriptor& input_desc, std::vector<Matrix::Data<double>*>& labels, double* coef, double* intercept, bool fit_intercept, bool normalize, int epochs, double alpha, double l1_ratio, bool shuffle, double tol, bool verbose) { fit_impl(handle, input_data, input_desc, labels, coef, intercept, fit_intercept, normalize, epochs, alpha, l1_ratio, shuffle, tol, verbose); } void predict(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, size_t n_parts, Matrix::Data<float>** input, size_t n_rows, size_t n_cols, float* coef, float intercept, Matrix::Data<float>** preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } void predict(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, size_t n_parts, Matrix::Data<double>** input, size_t n_rows, size_t n_cols, double* coef, double intercept, Matrix::Data<double>** preds, bool verbose) { predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); } } // namespace opg } // namespace CD } // namespace ML
6aa57cb5893525cab4222edc33a5cec9505b3368.hip
// !!! This is a file automatically generated by hipify!!! #define N 100000 #include "../COMMON/commons.cuh" int main(int argc, char **argv){ float *d_arr; // Memory allocation CUDA_CHECK(hipMalloc(&d_arr, N * sizeof(float))); // Memory set CUDA_CHECK(hipMemset(d_arr, 0, N * sizeof(float))); float *h_arr = new float[N](); for (int i = 0; i < N; i++){ h_arr[i] = (float) i; } // Memory Copy to GPU (destination, source, size, direction) CUDA_CHECK(hipMemcpy(d_arr, h_arr, N * sizeof(float), hipMemcpyHostToDevice)); // Works also thanks to UVA CUDA_CHECK(hipMemcpy(d_arr, h_arr, N * sizeof(float), hipMemcpyDefault)); //test<<<1,1>>>(h_arr); CUDA_CHECK(hipMemcpy(h_arr, d_arr, N * sizeof(float), D2H)); CUDA_CHECK(hipFree(d_arr)); CUDA_CHECK(hipDeviceSynchronize()); return 0; }
6aa57cb5893525cab4222edc33a5cec9505b3368.cu
#define N 100000 #include "../COMMON/commons.cuh" int main(int argc, char **argv){ float *d_arr; // Memory allocation CUDA_CHECK(cudaMalloc(&d_arr, N * sizeof(float))); // Memory set CUDA_CHECK(cudaMemset(d_arr, 0, N * sizeof(float))); float *h_arr = new float[N](); for (int i = 0; i < N; i++){ h_arr[i] = (float) i; } // Memory Copy to GPU (destination, source, size, direction) CUDA_CHECK(cudaMemcpy(d_arr, h_arr, N * sizeof(float), cudaMemcpyHostToDevice)); // Works also thanks to UVA CUDA_CHECK(cudaMemcpy(d_arr, h_arr, N * sizeof(float), cudaMemcpyDefault)); //test<<<1,1>>>(h_arr); CUDA_CHECK(cudaMemcpy(h_arr, d_arr, N * sizeof(float), D2H)); CUDA_CHECK(cudaFree(d_arr)); CUDA_CHECK(cudaDeviceSynchronize()); return 0; }
862f0f0bd38864259d24dc7b46d3d078393ef215.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // DOUBLE PRECISION KERNEL for even/odd fermions __global__ void DslashDDKernelEO(double2 *out, double2 *in, int *tables, int *phases, size_t gauge_offset) { int idx = blockIdx.x * blockDim.x + threadIdx.x + size_dev_h; // idx>sizeh, ODD double stag_phase = 1.0; //Store result in sharedMem __shared__ double ferm_out[3][2][NUM_THREADS]; //New tables indexing (index fastest) __shared__ int site_table[NUM_THREADS]; //Load link matrix U_mu(ix) in registers double link0x, link0y, link0z, link0w, link1x, link1y, link1z, link1w, link2x, link2y, link2z, link2w; float4 auxlink; double2 ferm_in_0, ferm_in_1, ferm_in_2; #ifdef IM_CHEM_POT double2 ferm_aux_0, ferm_aux_1, ferm_aux_2; #endif // DIRECTION 0 site_table[threadIdx.x] = tables[idx+4*size_dev]; ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*0)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*0)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*0)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*0)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*0)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*0)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] = link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] = link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] = link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] = link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] = C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y; ferm_out[2][1][threadIdx.x] = C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x; //DIRECTION 1 site_table[threadIdx.x] = tables[idx+5*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+size_dev]); #else stag_phase = (double) phases[idx+size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*1)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*1)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*1)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*1)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*1)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*1)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); //DIRECTION 2 site_table[threadIdx.x] = tables[idx+6*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+2*size_dev]); #else stag_phase = (double) phases[idx+2*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*2)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*2)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*2)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*2)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*2)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*2)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); //DIRECTION 3 site_table[threadIdx.x] = tables[idx+7*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+3*size_dev]); #else stag_phase = (double) phases[idx+3*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*3)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*3)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*3)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*3)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*3)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*3)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; #ifndef IM_CHEM_POT ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); #else ferm_aux_0.x = link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_aux_0.y = link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_aux_1.x = link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_aux_1.y = link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_aux_2.x = stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_aux_2.y = stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); ferm_out[0][0][threadIdx.x] += ferm_aux_0.x*dev_eim_cos_d - ferm_aux_0.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_0] ferm_out[0][1][threadIdx.x] += ferm_aux_0.x*dev_eim_sin_d + ferm_aux_0.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_0] ferm_out[1][0][threadIdx.x] += ferm_aux_1.x*dev_eim_cos_d - ferm_aux_1.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_1] ferm_out[1][1][threadIdx.x] += ferm_aux_1.x*dev_eim_sin_d + ferm_aux_1.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_1] ferm_out[2][0][threadIdx.x] += ferm_aux_2.x*dev_eim_cos_d - ferm_aux_2.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_2] ferm_out[2][1][threadIdx.x] += ferm_aux_2.x*dev_eim_sin_d + ferm_aux_2.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_2] #endif //---------------------------------------------------end of first block //DIRECTION 0 site_table[threadIdx.x] = tables[idx]; ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*0)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*0)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*0)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*0)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*0)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*0)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + C1RED*ferm_in_2.x +C1IMD*ferm_in_2.y; ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + C1RED*ferm_in_2.y -C1IMD*ferm_in_2.x; ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + C2RED*ferm_in_2.x +C2IMD*ferm_in_2.y; ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + C2RED*ferm_in_2.y -C2IMD*ferm_in_2.x; ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + C3RED*ferm_in_2.x +C3IMD*ferm_in_2.y; ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + C3RED*ferm_in_2.y -C3IMD*ferm_in_2.x; //DIRECTION 1 site_table[threadIdx.x] = tables[idx+size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*1)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*1)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*1)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*1)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*1)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*1)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y-C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y-C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); //DIRECTION 2 site_table[threadIdx.x] = tables[idx+2*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+2*size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+2*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*2)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*2)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*2)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*2)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*2)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*2)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); //DIRECTION 3 site_table[threadIdx.x] = tables[idx+3*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+3*size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+3*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*3)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*3)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*3)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*3)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*3)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*3)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; #ifndef IM_CHEM_POT ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); #else ferm_aux_0.x = link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_aux_0.y = link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_aux_1.x = link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_aux_1.y = link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_aux_2.x = link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_aux_2.y = link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); ferm_out[0][0][threadIdx.x] -= ferm_aux_0.x*dev_eim_cos_d + ferm_aux_0.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_0] ferm_out[0][1][threadIdx.x] -= -ferm_aux_0.x*dev_eim_sin_d + ferm_aux_0.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_0] ferm_out[1][0][threadIdx.x] -= ferm_aux_1.x*dev_eim_cos_d + ferm_aux_1.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_1] ferm_out[1][1][threadIdx.x] -= -ferm_aux_1.x*dev_eim_sin_d + ferm_aux_1.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_1] ferm_out[2][0][threadIdx.x] -= ferm_aux_2.x*dev_eim_cos_d + ferm_aux_2.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_2] ferm_out[2][1][threadIdx.x] -= -ferm_aux_2.x*dev_eim_sin_d + ferm_aux_2.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_2] #endif //-------------------------------------------------end of second block // even ferm_in_0 = in[ idx - size_dev_h]; ferm_in_1 = in[ size_dev + idx - size_dev_h]; ferm_in_2 = in[ 2*size_dev + idx - size_dev_h]; out[idx - size_dev_h ].x = mass_d_dev*ferm_in_0.x; out[idx - size_dev_h ].y = mass_d_dev*ferm_in_0.y; out[idx + size_dev - size_dev_h ].x = mass_d_dev*ferm_in_1.x; out[idx + size_dev - size_dev_h ].y = mass_d_dev*ferm_in_1.y; out[idx + 2*size_dev - size_dev_h ].x = mass_d_dev*ferm_in_2.x; out[idx + 2*size_dev - size_dev_h ].y = mass_d_dev*ferm_in_2.y; //odd out[idx ].x = ferm_out[0][0][threadIdx.x]*(double)0.5; out[idx ].y = ferm_out[0][1][threadIdx.x]*(double)0.5; out[idx + size_dev ].x = ferm_out[1][0][threadIdx.x]*(double)0.5; out[idx + size_dev ].y = ferm_out[1][1][threadIdx.x]*(double)0.5; out[idx + 2*size_dev ].x = ferm_out[2][0][threadIdx.x]*(double)0.5; out[idx + 2*size_dev ].y = ferm_out[2][1][threadIdx.x]*(double)0.5; //-------------------------------------------------end of Dslash } __global__ void DslashDaggerDDKernelEO(double2 *out, double2 *in, int *tables, int *phases, size_t gauge_offset) { int idx = blockIdx.x*blockDim.x + threadIdx.x; // idx< sizeh, EVEN!! double stag_phase = 1.0; //Store result in sharedMem __shared__ double ferm_out[3][2][NUM_THREADS]; #ifdef IM_CHEM_POT double2 ferm_aux_0, ferm_aux_1, ferm_aux_2; #endif //New tables indexing (index fastest) __shared__ int site_table[NUM_THREADS]; //Load link matrix U_mu(ix) in registers double link0x, link0y, link0z, link0w, link1x, link1y, link1z, link1w, link2x, link2y, link2z, link2w; float4 auxlink; double2 ferm_in_0, ferm_in_1, ferm_in_2; // DIRECTION 0 site_table[threadIdx.x] = tables[idx+4*size_dev]; ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*0)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*0)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*0)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*0)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*0)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*0)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] = link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] = link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] = link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] = link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] = C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y; ferm_out[2][1][threadIdx.x] = C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x; //DIRECTION 1 site_table[threadIdx.x] = tables[idx+5*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+size_dev]); #else stag_phase = (double) phases[idx+size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*1)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*1)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*1)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*1)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*1)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*1)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); //DIRECTION 2 site_table[threadIdx.x] = tables[idx+6*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+2*size_dev]); #else stag_phase = (double) phases[idx+2*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*2)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*2)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*2)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*2)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*2)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*2)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); //DIRECTION 3 site_table[threadIdx.x] = tables[idx+7*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+3*size_dev]); #else stag_phase = (double) phases[idx+3*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*3)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*3)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*3)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*3)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*3)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*3)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; #ifndef IM_CHEM_POT ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); #else ferm_aux_0.x = link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_aux_0.y = link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_aux_1.x = link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_aux_1.y = link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_aux_2.x = stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_aux_2.y = stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); ferm_out[0][0][threadIdx.x] += ferm_aux_0.x*dev_eim_cos_d - ferm_aux_0.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_0] ferm_out[0][1][threadIdx.x] += ferm_aux_0.x*dev_eim_sin_d + ferm_aux_0.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_0] ferm_out[1][0][threadIdx.x] += ferm_aux_1.x*dev_eim_cos_d - ferm_aux_1.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_1] ferm_out[1][1][threadIdx.x] += ferm_aux_1.x*dev_eim_sin_d + ferm_aux_1.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_1] ferm_out[2][0][threadIdx.x] += ferm_aux_2.x*dev_eim_cos_d - ferm_aux_2.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_2] ferm_out[2][1][threadIdx.x] += ferm_aux_2.x*dev_eim_sin_d + ferm_aux_2.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_2] #endif //---------------------------------------------------end of first block //DIRECTION 0 site_table[threadIdx.x] = tables[idx]; ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*0)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*0)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*0)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*0)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*0)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*0)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + C1RED*ferm_in_2.x +C1IMD*ferm_in_2.y; ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + C1RED*ferm_in_2.y -C1IMD*ferm_in_2.x; ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + C2RED*ferm_in_2.x +C2IMD*ferm_in_2.y; ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + C2RED*ferm_in_2.y -C2IMD*ferm_in_2.x; ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + C3RED*ferm_in_2.x +C3IMD*ferm_in_2.y; ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + C3RED*ferm_in_2.y -C3IMD*ferm_in_2.x; //DIRECTION 1 site_table[threadIdx.x] = tables[idx+size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*1)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*1)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*1)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*1)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*1)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*1)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y-C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y-C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); //DIRECTION 2 site_table[threadIdx.x] = tables[idx+2*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+2*size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+2*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*2)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*2)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*2)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*2)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*2)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*2)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); //DIRECTION 3 site_table[threadIdx.x] = tables[idx+3*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+3*size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+3*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*3)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*3)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*3)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*3)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*3)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*3)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; #ifndef IM_CHEM_POT ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); #else ferm_aux_0.x = link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_aux_0.y = link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_aux_1.x = link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_aux_1.y = link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_aux_2.x = link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_aux_2.y = link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); ferm_out[0][0][threadIdx.x] -= ferm_aux_0.x*dev_eim_cos_d + ferm_aux_0.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_0] ferm_out[0][1][threadIdx.x] -= -ferm_aux_0.x*dev_eim_sin_d + ferm_aux_0.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_0] ferm_out[1][0][threadIdx.x] -= ferm_aux_1.x*dev_eim_cos_d + ferm_aux_1.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_1] ferm_out[1][1][threadIdx.x] -= -ferm_aux_1.x*dev_eim_sin_d + ferm_aux_1.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_1] ferm_out[2][0][threadIdx.x] -= ferm_aux_2.x*dev_eim_cos_d + ferm_aux_2.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_2] ferm_out[2][1][threadIdx.x] -= -ferm_aux_2.x*dev_eim_sin_d + ferm_aux_2.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_2] #endif //-------------------------------------------------end of second block // even ferm_in_0 = in[ idx]; ferm_in_1 = in[ size_dev + idx]; ferm_in_2 = in[ 2*size_dev + idx]; out[idx ].x = mass_d_dev*ferm_in_0.x - ferm_out[0][0][threadIdx.x]*(double)0.5; out[idx ].y = mass_d_dev*ferm_in_0.y - ferm_out[0][1][threadIdx.x]*(double)0.5; out[idx + size_dev ].x = mass_d_dev*ferm_in_1.x - ferm_out[1][0][threadIdx.x]*(double)0.5; out[idx + size_dev ].y = mass_d_dev*ferm_in_1.y - ferm_out[1][1][threadIdx.x]*(double)0.5; out[idx + 2*size_dev ].x = mass_d_dev*ferm_in_2.x - ferm_out[2][0][threadIdx.x]*(double)0.5; out[idx + 2*size_dev ].y = mass_d_dev*ferm_in_2.y - ferm_out[2][1][threadIdx.x]*(double)0.5; // odd out[idx + size_dev_h ].x = (double)0.0; out[idx + size_dev_h ].y = (double)0.0; out[idx + size_dev + size_dev_h ].x = (double)0.0; out[idx + size_dev + size_dev_h ].y = (double)0.0; out[idx + 2*size_dev + size_dev_h ].x = (double)0.0; out[idx + 2*size_dev + size_dev_h ].y = (double)0.0; //-------------------------------------------------end of DslashDagger } /* ================================================================= EXTERNAL C FUNCTION */ void DslashOperatorDDEO(double2 *out, double2 *in, const int isign) { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside DslashOperatorDDEO ...\033[0m\n"); #endif dim3 BlockDimension(NUM_THREADS); dim3 GridDimension(sizeh/BlockDimension.x); //Half sites size_t gauge_field_size = sizeof(float4)*size*12; size_t offset_g; cudaSafe(AT,hipBindTexture(&offset_g, gauge_texRef, gauge_field_device, 2*gauge_field_size), "hipBindTexture"); offset_g/=sizeof(float4); if(isign == PLUS) { hipLaunchKernelGGL(( DslashDDKernelEO), dim3(GridDimension),dim3(BlockDimension), 0, 0, out, in, device_table, device_phases, offset_g); cudaCheckError(AT,"DslashDDKernelEO"); } if(isign == MINUS) { hipLaunchKernelGGL(( DslashDaggerDDKernelEO), dim3(GridDimension),dim3(BlockDimension), 0, 0, out, in, device_table, device_phases, offset_g); cudaCheckError(AT,"DslashDaggerDDKernelEO"); } cudaSafe(AT,hipUnbindTexture(gauge_texRef), "hipUnbindTexture"); #ifdef DEBUG_MODE_2 printf("\033[32m\tterminated DslashOperatorDDEO \033[0m\n"); #endif }
862f0f0bd38864259d24dc7b46d3d078393ef215.cu
// DOUBLE PRECISION KERNEL for even/odd fermions __global__ void DslashDDKernelEO(double2 *out, double2 *in, int *tables, int *phases, size_t gauge_offset) { int idx = blockIdx.x * blockDim.x + threadIdx.x + size_dev_h; // idx>sizeh, ODD double stag_phase = 1.0; //Store result in sharedMem __shared__ double ferm_out[3][2][NUM_THREADS]; //New tables indexing (index fastest) __shared__ int site_table[NUM_THREADS]; //Load link matrix U_mu(ix) in registers double link0x, link0y, link0z, link0w, link1x, link1y, link1z, link1w, link2x, link2y, link2z, link2w; float4 auxlink; double2 ferm_in_0, ferm_in_1, ferm_in_2; #ifdef IM_CHEM_POT double2 ferm_aux_0, ferm_aux_1, ferm_aux_2; #endif // DIRECTION 0 site_table[threadIdx.x] = tables[idx+4*size_dev]; ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*0)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*0)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*0)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*0)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*0)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*0)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] = link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] = link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] = link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] = link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] = C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y; ferm_out[2][1][threadIdx.x] = C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x; //DIRECTION 1 site_table[threadIdx.x] = tables[idx+5*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+size_dev]); #else stag_phase = (double) phases[idx+size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*1)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*1)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*1)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*1)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*1)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*1)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); //DIRECTION 2 site_table[threadIdx.x] = tables[idx+6*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+2*size_dev]); #else stag_phase = (double) phases[idx+2*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*2)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*2)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*2)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*2)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*2)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*2)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); //DIRECTION 3 site_table[threadIdx.x] = tables[idx+7*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+3*size_dev]); #else stag_phase = (double) phases[idx+3*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*3)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*3)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*3)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*3)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*3)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*3)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; #ifndef IM_CHEM_POT ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); #else ferm_aux_0.x = link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_aux_0.y = link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_aux_1.x = link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_aux_1.y = link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_aux_2.x = stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_aux_2.y = stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); ferm_out[0][0][threadIdx.x] += ferm_aux_0.x*dev_eim_cos_d - ferm_aux_0.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_0] ferm_out[0][1][threadIdx.x] += ferm_aux_0.x*dev_eim_sin_d + ferm_aux_0.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_0] ferm_out[1][0][threadIdx.x] += ferm_aux_1.x*dev_eim_cos_d - ferm_aux_1.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_1] ferm_out[1][1][threadIdx.x] += ferm_aux_1.x*dev_eim_sin_d + ferm_aux_1.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_1] ferm_out[2][0][threadIdx.x] += ferm_aux_2.x*dev_eim_cos_d - ferm_aux_2.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_2] ferm_out[2][1][threadIdx.x] += ferm_aux_2.x*dev_eim_sin_d + ferm_aux_2.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_2] #endif //---------------------------------------------------end of first block //DIRECTION 0 site_table[threadIdx.x] = tables[idx]; ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*0)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*0)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*0)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*0)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*0)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*0)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + C1RED*ferm_in_2.x +C1IMD*ferm_in_2.y; ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + C1RED*ferm_in_2.y -C1IMD*ferm_in_2.x; ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + C2RED*ferm_in_2.x +C2IMD*ferm_in_2.y; ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + C2RED*ferm_in_2.y -C2IMD*ferm_in_2.x; ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + C3RED*ferm_in_2.x +C3IMD*ferm_in_2.y; ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + C3RED*ferm_in_2.y -C3IMD*ferm_in_2.x; //DIRECTION 1 site_table[threadIdx.x] = tables[idx+size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*1)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*1)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*1)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*1)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*1)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*1)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y-C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y-C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); //DIRECTION 2 site_table[threadIdx.x] = tables[idx+2*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+2*size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+2*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*2)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*2)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*2)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*2)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*2)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*2)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); //DIRECTION 3 site_table[threadIdx.x] = tables[idx+3*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+3*size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+3*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*3)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*3)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*3)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*3)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*3)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*3)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; #ifndef IM_CHEM_POT ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); #else ferm_aux_0.x = link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_aux_0.y = link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_aux_1.x = link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_aux_1.y = link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_aux_2.x = link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_aux_2.y = link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); ferm_out[0][0][threadIdx.x] -= ferm_aux_0.x*dev_eim_cos_d + ferm_aux_0.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_0] ferm_out[0][1][threadIdx.x] -= -ferm_aux_0.x*dev_eim_sin_d + ferm_aux_0.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_0] ferm_out[1][0][threadIdx.x] -= ferm_aux_1.x*dev_eim_cos_d + ferm_aux_1.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_1] ferm_out[1][1][threadIdx.x] -= -ferm_aux_1.x*dev_eim_sin_d + ferm_aux_1.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_1] ferm_out[2][0][threadIdx.x] -= ferm_aux_2.x*dev_eim_cos_d + ferm_aux_2.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_2] ferm_out[2][1][threadIdx.x] -= -ferm_aux_2.x*dev_eim_sin_d + ferm_aux_2.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_2] #endif //-------------------------------------------------end of second block // even ferm_in_0 = in[ idx - size_dev_h]; ferm_in_1 = in[ size_dev + idx - size_dev_h]; ferm_in_2 = in[ 2*size_dev + idx - size_dev_h]; out[idx - size_dev_h ].x = mass_d_dev*ferm_in_0.x; out[idx - size_dev_h ].y = mass_d_dev*ferm_in_0.y; out[idx + size_dev - size_dev_h ].x = mass_d_dev*ferm_in_1.x; out[idx + size_dev - size_dev_h ].y = mass_d_dev*ferm_in_1.y; out[idx + 2*size_dev - size_dev_h ].x = mass_d_dev*ferm_in_2.x; out[idx + 2*size_dev - size_dev_h ].y = mass_d_dev*ferm_in_2.y; //odd out[idx ].x = ferm_out[0][0][threadIdx.x]*(double)0.5; out[idx ].y = ferm_out[0][1][threadIdx.x]*(double)0.5; out[idx + size_dev ].x = ferm_out[1][0][threadIdx.x]*(double)0.5; out[idx + size_dev ].y = ferm_out[1][1][threadIdx.x]*(double)0.5; out[idx + 2*size_dev ].x = ferm_out[2][0][threadIdx.x]*(double)0.5; out[idx + 2*size_dev ].y = ferm_out[2][1][threadIdx.x]*(double)0.5; //-------------------------------------------------end of Dslash } __global__ void DslashDaggerDDKernelEO(double2 *out, double2 *in, int *tables, int *phases, size_t gauge_offset) { int idx = blockIdx.x*blockDim.x + threadIdx.x; // idx< sizeh, EVEN!! double stag_phase = 1.0; //Store result in sharedMem __shared__ double ferm_out[3][2][NUM_THREADS]; #ifdef IM_CHEM_POT double2 ferm_aux_0, ferm_aux_1, ferm_aux_2; #endif //New tables indexing (index fastest) __shared__ int site_table[NUM_THREADS]; //Load link matrix U_mu(ix) in registers double link0x, link0y, link0z, link0w, link1x, link1y, link1z, link1w, link2x, link2y, link2z, link2w; float4 auxlink; double2 ferm_in_0, ferm_in_1, ferm_in_2; // DIRECTION 0 site_table[threadIdx.x] = tables[idx+4*size_dev]; ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*0)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*0)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*0)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*0)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*0)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*0)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] = link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] = link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] = link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] = link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] = C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y; ferm_out[2][1][threadIdx.x] = C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x; //DIRECTION 1 site_table[threadIdx.x] = tables[idx+5*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+size_dev]); #else stag_phase = (double) phases[idx+size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*1)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*1)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*1)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*1)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*1)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*1)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); //DIRECTION 2 site_table[threadIdx.x] = tables[idx+6*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+2*size_dev]); #else stag_phase = (double) phases[idx+2*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*2)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*2)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*2)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*2)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*2)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*2)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); //DIRECTION 3 site_table[threadIdx.x] = tables[idx+7*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[idx+3*size_dev]); #else stag_phase = (double) phases[idx+3*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(0+3*3)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(1+3*3)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, idx + gauge_offset + size_dev*(2+3*3)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(0+3*3)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(1+3*3)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + idx + gauge_offset + size_dev*(2+3*3)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; #ifndef IM_CHEM_POT ferm_out[0][0][threadIdx.x] += link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_out[0][1][threadIdx.x] += link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_out[1][0][threadIdx.x] += link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_out[1][1][threadIdx.x] += link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_out[2][0][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] += stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); #else ferm_aux_0.x = link0x*ferm_in_0.x-link0y*ferm_in_0.y+ link0z*ferm_in_1.x-link0w*ferm_in_1.y+ link1x*ferm_in_2.x-link1y*ferm_in_2.y; ferm_aux_0.y = link0x*ferm_in_0.y+link0y*ferm_in_0.x+ link0z*ferm_in_1.y+link0w*ferm_in_1.x+ link1x*ferm_in_2.y+link1y*ferm_in_2.x; ferm_aux_1.x = link1z*ferm_in_0.x-link1w*ferm_in_0.y+ link2x*ferm_in_1.x-link2y*ferm_in_1.y+ link2z*ferm_in_2.x-link2w*ferm_in_2.y; ferm_aux_1.y = link1z*ferm_in_0.y+link1w*ferm_in_0.x+ link2x*ferm_in_1.y+link2y*ferm_in_1.x+ link2z*ferm_in_2.y+link2w*ferm_in_2.x; ferm_aux_2.x = stag_phase*(C1RED*ferm_in_0.x-C1IMD*ferm_in_0.y+ C2RED*ferm_in_1.x-C2IMD*ferm_in_1.y+ C3RED*ferm_in_2.x-C3IMD*ferm_in_2.y); ferm_aux_2.y = stag_phase*(C1RED*ferm_in_0.y+C1IMD*ferm_in_0.x+ C2RED*ferm_in_1.y+C2IMD*ferm_in_1.x+ C3RED*ferm_in_2.y+C3IMD*ferm_in_2.x); ferm_out[0][0][threadIdx.x] += ferm_aux_0.x*dev_eim_cos_d - ferm_aux_0.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_0] ferm_out[0][1][threadIdx.x] += ferm_aux_0.x*dev_eim_sin_d + ferm_aux_0.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_0] ferm_out[1][0][threadIdx.x] += ferm_aux_1.x*dev_eim_cos_d - ferm_aux_1.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_1] ferm_out[1][1][threadIdx.x] += ferm_aux_1.x*dev_eim_sin_d + ferm_aux_1.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_1] ferm_out[2][0][threadIdx.x] += ferm_aux_2.x*dev_eim_cos_d - ferm_aux_2.y*dev_eim_sin_d; // Re[e^{imu}*ferm_aux_2] ferm_out[2][1][threadIdx.x] += ferm_aux_2.x*dev_eim_sin_d + ferm_aux_2.y*dev_eim_cos_d; // Im[e^{imu}*ferm_aux_2] #endif //---------------------------------------------------end of first block //DIRECTION 0 site_table[threadIdx.x] = tables[idx]; ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*0)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*0)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*0)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*0)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*0)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*0)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + C1RED*ferm_in_2.x +C1IMD*ferm_in_2.y; ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + C1RED*ferm_in_2.y -C1IMD*ferm_in_2.x; ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + C2RED*ferm_in_2.x +C2IMD*ferm_in_2.y; ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + C2RED*ferm_in_2.y -C2IMD*ferm_in_2.x; ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + C3RED*ferm_in_2.x +C3IMD*ferm_in_2.y; ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + C3RED*ferm_in_2.y -C3IMD*ferm_in_2.x; //DIRECTION 1 site_table[threadIdx.x] = tables[idx+size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*1)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*1)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*1)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*1)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*1)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*1)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y-C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y-C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); //DIRECTION 2 site_table[threadIdx.x] = tables[idx+2*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+2*size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+2*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*2)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*2)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*2)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*2)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*2)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*2)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); //DIRECTION 3 site_table[threadIdx.x] = tables[idx+3*size_dev]; #ifdef USE_INTRINSIC stag_phase = __int2double_rn(phases[site_table[threadIdx.x]+3*size_dev]); #else stag_phase = (double) phases[site_table[threadIdx.x]+3*size_dev]; #endif ferm_in_0 = in[ site_table[threadIdx.x]]; ferm_in_1 = in[ size_dev + site_table[threadIdx.x]]; ferm_in_2 = in[ 2*size_dev + site_table[threadIdx.x]]; // 1st float auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*3)); link0x=(double) auxlink.x; link0y=(double) auxlink.y; link0z=(double) auxlink.z; link0w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*3)); link1x=(double) auxlink.x; link1y=(double) auxlink.y; link1z=(double) auxlink.z; link1w=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*3)); link2x=(double) auxlink.x; link2y=(double) auxlink.y; link2z=(double) auxlink.z; link2w=(double) auxlink.w; // 2nd float auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(0+3*3)); link0x+=(double) auxlink.x; link0y+=(double) auxlink.y; link0z+=(double) auxlink.z; link0w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(1+3*3)); link1x+=(double) auxlink.x; link1y+=(double) auxlink.y; link1z+=(double) auxlink.z; link1w+=(double) auxlink.w; auxlink = tex1Dfetch(gauge_texRef, 12*size_dev + site_table[threadIdx.x] + gauge_offset + size_dev*(2+3*3)); link2x+=(double) auxlink.x; link2y+=(double) auxlink.y; link2z+=(double) auxlink.z; link2w+=(double) auxlink.w; #ifndef IM_CHEM_POT ferm_out[0][0][threadIdx.x] -= link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_out[0][1][threadIdx.x] -= link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_out[1][0][threadIdx.x] -= link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_out[1][1][threadIdx.x] -= link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_out[2][0][threadIdx.x] -= link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_out[2][1][threadIdx.x] -= link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); #else ferm_aux_0.x = link0x*ferm_in_0.x+link0y*ferm_in_0.y + link1z*ferm_in_1.x+link1w*ferm_in_1.y + stag_phase*(C1RED*ferm_in_2.x+ C1IMD*ferm_in_2.y); ferm_aux_0.y = link0x*ferm_in_0.y-link0y*ferm_in_0.x + link1z*ferm_in_1.y-link1w*ferm_in_1.x + stag_phase*(C1RED*ferm_in_2.y- C1IMD*ferm_in_2.x); ferm_aux_1.x = link0z*ferm_in_0.x+link0w*ferm_in_0.y + link2x*ferm_in_1.x+link2y*ferm_in_1.y + stag_phase*(C2RED*ferm_in_2.x+ C2IMD*ferm_in_2.y); ferm_aux_1.y = link0z*ferm_in_0.y-link0w*ferm_in_0.x + link2x*ferm_in_1.y-link2y*ferm_in_1.x + stag_phase*(C2RED*ferm_in_2.y- C2IMD*ferm_in_2.x); ferm_aux_2.x = link1x*ferm_in_0.x+link1y*ferm_in_0.y + link2z*ferm_in_1.x+link2w*ferm_in_1.y + stag_phase*(C3RED*ferm_in_2.x+ C3IMD*ferm_in_2.y); ferm_aux_2.y = link1x*ferm_in_0.y-link1y*ferm_in_0.x + link2z*ferm_in_1.y-link2w*ferm_in_1.x + stag_phase*(C3RED*ferm_in_2.y- C3IMD*ferm_in_2.x); ferm_out[0][0][threadIdx.x] -= ferm_aux_0.x*dev_eim_cos_d + ferm_aux_0.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_0] ferm_out[0][1][threadIdx.x] -= -ferm_aux_0.x*dev_eim_sin_d + ferm_aux_0.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_0] ferm_out[1][0][threadIdx.x] -= ferm_aux_1.x*dev_eim_cos_d + ferm_aux_1.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_1] ferm_out[1][1][threadIdx.x] -= -ferm_aux_1.x*dev_eim_sin_d + ferm_aux_1.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_1] ferm_out[2][0][threadIdx.x] -= ferm_aux_2.x*dev_eim_cos_d + ferm_aux_2.y*dev_eim_sin_d; // Re[e^{-imu}*ferm_aux_2] ferm_out[2][1][threadIdx.x] -= -ferm_aux_2.x*dev_eim_sin_d + ferm_aux_2.y*dev_eim_cos_d; // Im[e^{-imu}*ferm_aux_2] #endif //-------------------------------------------------end of second block // even ferm_in_0 = in[ idx]; ferm_in_1 = in[ size_dev + idx]; ferm_in_2 = in[ 2*size_dev + idx]; out[idx ].x = mass_d_dev*ferm_in_0.x - ferm_out[0][0][threadIdx.x]*(double)0.5; out[idx ].y = mass_d_dev*ferm_in_0.y - ferm_out[0][1][threadIdx.x]*(double)0.5; out[idx + size_dev ].x = mass_d_dev*ferm_in_1.x - ferm_out[1][0][threadIdx.x]*(double)0.5; out[idx + size_dev ].y = mass_d_dev*ferm_in_1.y - ferm_out[1][1][threadIdx.x]*(double)0.5; out[idx + 2*size_dev ].x = mass_d_dev*ferm_in_2.x - ferm_out[2][0][threadIdx.x]*(double)0.5; out[idx + 2*size_dev ].y = mass_d_dev*ferm_in_2.y - ferm_out[2][1][threadIdx.x]*(double)0.5; // odd out[idx + size_dev_h ].x = (double)0.0; out[idx + size_dev_h ].y = (double)0.0; out[idx + size_dev + size_dev_h ].x = (double)0.0; out[idx + size_dev + size_dev_h ].y = (double)0.0; out[idx + 2*size_dev + size_dev_h ].x = (double)0.0; out[idx + 2*size_dev + size_dev_h ].y = (double)0.0; //-------------------------------------------------end of DslashDagger } /* ================================================================= EXTERNAL C FUNCTION */ void DslashOperatorDDEO(double2 *out, double2 *in, const int isign) { #ifdef DEBUG_MODE_2 printf("\033[32mDEBUG: inside DslashOperatorDDEO ...\033[0m\n"); #endif dim3 BlockDimension(NUM_THREADS); dim3 GridDimension(sizeh/BlockDimension.x); //Half sites size_t gauge_field_size = sizeof(float4)*size*12; size_t offset_g; cudaSafe(AT,cudaBindTexture(&offset_g, gauge_texRef, gauge_field_device, 2*gauge_field_size), "cudaBindTexture"); offset_g/=sizeof(float4); if(isign == PLUS) { DslashDDKernelEO<<<GridDimension,BlockDimension>>>(out, in, device_table, device_phases, offset_g); cudaCheckError(AT,"DslashDDKernelEO"); } if(isign == MINUS) { DslashDaggerDDKernelEO<<<GridDimension,BlockDimension>>>(out, in, device_table, device_phases, offset_g); cudaCheckError(AT,"DslashDaggerDDKernelEO"); } cudaSafe(AT,cudaUnbindTexture(gauge_texRef), "cudaUnbindTexture"); #ifdef DEBUG_MODE_2 printf("\033[32m\tterminated DslashOperatorDDEO \033[0m\n"); #endif }
453c6d4238af903595eb6eed08bd5c527019bb75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (C) 2019 ETH Zurich // Copyright (C) 2019 UT-Battelle, LLC // All rights reserved. // // See LICENSE for terms of usage. // See CITATION.md for citation guidelines, if DCA++ is used for scientific publications. // // Author: Giovanni Balduzzi ([email protected]) // // Implementation of the G0 computation for time measurements. #include "dca/phys/dca_step/cluster_solver/shared_tools/accumulation/kernels_interface.hpp" #include "dca/util/cuda_blocks.hpp" #include "dca/phys/dca_step/cluster_solver/shared_tools/solver_helper.cuh" namespace dca { namespace phys { namespace solver { namespace details { // dca::phys::solver::details:: template <typename Real> __global__ void computeG0Kernel(linalg::MatrixView<Real, linalg::GPU> mat, const DeviceInterpolationData<Real> g0, const Real* t_l, const int* b_l, const int* r_l, const Real* t_r, const int* b_r, const int* r_r) { const unsigned i = blockIdx.x * blockDim.x + threadIdx.x; const unsigned j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= mat.nrRows() || j >= mat.nrCols()) return; const auto index = solver_helper.index(b_l[i], b_r[j], r_l[i], r_r[j]); const Real tau = t_l[i] - t_r[j]; mat(i, j) = g0(tau, index); } template <typename Real> void computeG0(linalg::MatrixView<Real, linalg::GPU>& g0_mat, const DeviceInterpolationData<Real> g0, const Real* t_l, const int* b_l, const int* r_l, const Real* t_r, const int* b_r, const int* r_r, hipStream_t stream) { assert(SolverHelper::initialized()); auto blocks = dca::util::get2DBlockSize(g0_mat.nrRows(), g0_mat.nrCols(), 32); hipLaunchKernelGGL(( computeG0Kernel), dim3(blocks[0]), dim3(blocks[1]), 0, stream, g0_mat, g0, t_l, b_l, r_l, t_r, b_r, r_r); } // Instantation. template void computeG0<double>(linalg::MatrixView<double, linalg::GPU>&, const DeviceInterpolationData<double>, const double*, const int*, const int*, const double*, const int*, const int*, hipStream_t); template void computeG0<float>(linalg::MatrixView<float, linalg::GPU>&, const DeviceInterpolationData<float>, const float*, const int*, const int*, const float*, const int*, const int*, hipStream_t); } // namespace details } // namespace solver } // namespace phys } // namespace dca
453c6d4238af903595eb6eed08bd5c527019bb75.cu
// Copyright (C) 2019 ETH Zurich // Copyright (C) 2019 UT-Battelle, LLC // All rights reserved. // // See LICENSE for terms of usage. // See CITATION.md for citation guidelines, if DCA++ is used for scientific publications. // // Author: Giovanni Balduzzi ([email protected]) // // Implementation of the G0 computation for time measurements. #include "dca/phys/dca_step/cluster_solver/shared_tools/accumulation/kernels_interface.hpp" #include "dca/util/cuda_blocks.hpp" #include "dca/phys/dca_step/cluster_solver/shared_tools/solver_helper.cuh" namespace dca { namespace phys { namespace solver { namespace details { // dca::phys::solver::details:: template <typename Real> __global__ void computeG0Kernel(linalg::MatrixView<Real, linalg::GPU> mat, const DeviceInterpolationData<Real> g0, const Real* t_l, const int* b_l, const int* r_l, const Real* t_r, const int* b_r, const int* r_r) { const unsigned i = blockIdx.x * blockDim.x + threadIdx.x; const unsigned j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= mat.nrRows() || j >= mat.nrCols()) return; const auto index = solver_helper.index(b_l[i], b_r[j], r_l[i], r_r[j]); const Real tau = t_l[i] - t_r[j]; mat(i, j) = g0(tau, index); } template <typename Real> void computeG0(linalg::MatrixView<Real, linalg::GPU>& g0_mat, const DeviceInterpolationData<Real> g0, const Real* t_l, const int* b_l, const int* r_l, const Real* t_r, const int* b_r, const int* r_r, cudaStream_t stream) { assert(SolverHelper::initialized()); auto blocks = dca::util::get2DBlockSize(g0_mat.nrRows(), g0_mat.nrCols(), 32); computeG0Kernel<<<blocks[0], blocks[1], 0, stream>>>(g0_mat, g0, t_l, b_l, r_l, t_r, b_r, r_r); } // Instantation. template void computeG0<double>(linalg::MatrixView<double, linalg::GPU>&, const DeviceInterpolationData<double>, const double*, const int*, const int*, const double*, const int*, const int*, cudaStream_t); template void computeG0<float>(linalg::MatrixView<float, linalg::GPU>&, const DeviceInterpolationData<float>, const float*, const int*, const int*, const float*, const int*, const int*, cudaStream_t); } // namespace details } // namespace solver } // namespace phys } // namespace dca
7b4bfc62f44fbff185789cfcc22ca2c36956cd7e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_RandomForest_Constants.cu" #include "..\cuda_Common_Include.cu" #include "..\cuda_Common_RNG.cu" namespace ExtremeFindSplit{ __constant__ SharedBuffer cb_constants; // Host function for updates to constants __host__ void cuda_RandomForest_UpdateConstants(void* src){ hipMemcpyToSymbolAsync(cb_constants,src,sizeof(SharedBuffer)); } __device__ c_precision entropyConditionedOnRows(unsigned int *matrix); __device__ c_precision entropyOverColumns(unsigned int *matrix); __device__ void evaluateNumericAttribute(paramPack_Kernel* params); __device__ void evaluateNominalAttribute(paramPack_Kernel* params); __shared__ unsigned int s_attribute, s_nodeIndStart, s_numInds, s_treeId, s_attType; __shared__ unsigned int s_currDist[max_nominal*2]; __shared__ c_precision s_split; __global__ void kernel_entry(paramPack_Kernel params){ unsigned int nodeId = params.innerNodeIds[blockIdx.x + cb_constants.cb_currentDepth + (cb_constants.cb_nodeBufferEnd+1)*cb_constants.cb_nodeIdFlip]; // Block initialization if(threadIdx.x == 0){ if(cb_constants.cb_numFeatures == 0) params.splitVals[nodeId] = -FLT_MAX; s_treeId = params.treeIds[nodeId]; s_split = 0; s_nodeIndStart = params.nodeIndicesLimits[nodeId]; s_numInds = params.nodeIndicesLimits[(cb_constants.cb_nodeBufferEnd+1)+nodeId] - s_nodeIndStart + 1; stateRNG_xorShift128 state; state.x = params.rngStates[blockIdx.x]; state.y = params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)]; state.z = params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)*2]; state.w = params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)*3]; s_attribute = xorShift128(&state) % cb_constants.cb_attributeCount; s_attType = params.attributeTypes[s_attribute]; if(s_attType >= max_nominal){ unsigned int instanceInd; c_precision splitPoint = 0; for(unsigned int i=0; i<10; ++i){ instanceInd = params.nodeIndices[s_nodeIndStart + (xorShift128(&state) % s_numInds)]; splitPoint += params.dataset[cb_constants.cb_instanceCount*s_attribute + instanceInd]; } s_split = splitPoint/10; s_currDist[0] = s_currDist[1] = 0; s_currDist[2] = s_currDist[3] = 0; } params.rngStates[blockIdx.x] = state.x; params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)] = state.y; params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)*2] = state.z; params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)*3] = state.w; } __syncthreads(); if(s_attType >= max_nominal) evaluateNumericAttribute(&params); else evaluateNominalAttribute(&params); __syncthreads(); // Block global memory writes if(threadIdx.x == 0){ c_precision prior = entropyOverColumns(s_currDist); c_precision posterior = entropyConditionedOnRows(s_currDist); if(params.splitVals[nodeId] < prior-posterior){ // Save splitpoint, attribute and distribution params.splitVals[nodeId] = prior-posterior; params.splitPoints[nodeId] = s_split; params.attributes[nodeId] = s_attribute; } } } __device__ void evaluateNumericAttribute(paramPack_Kernel* params){ unsigned int window = 0; unsigned int numInds = s_numInds; unsigned int nodeIndStart = s_nodeIndStart; unsigned int attribute = s_attribute; unsigned int treeId = s_treeId; unsigned int weight; unsigned int inst; c_precision val; while(threadIdx.x + window < numInds){ inst = params->nodeIndices[nodeIndStart + threadIdx.x + window]; val = params->dataset[cb_constants.cb_instanceCount * attribute + inst]; weight = params->bagWeights[treeId*cb_constants.cb_instanceCount + inst]; if(val != -FLT_MAX) atomicAdd(&s_currDist[2*((val < s_split) ? 0 : 1)+params->classValues[inst]],weight); else atomicAdd(&s_currDist[params->classValues[inst]],weight); window += thread_group_size; } } __device__ void evaluateNominalAttribute(paramPack_Kernel* params){ unsigned int window = 0; unsigned int numInds = s_numInds; unsigned int nodeIndStart = s_nodeIndStart; unsigned int attribute = s_attribute; unsigned int treeId = s_treeId; unsigned int weight; unsigned int inst; c_precision val; if(threadIdx.x < 40){ s_currDist[threadIdx.x] = 0.0; } __syncthreads(); // Split on median value while(threadIdx.x + window < numInds){ inst = params->nodeIndices[nodeIndStart + threadIdx.x + window]; val = params->dataset[cb_constants.cb_instanceCount * attribute + inst]; weight = params->bagWeights[treeId*cb_constants.cb_instanceCount + inst]; if(val != -FLT_MAX) atomicAdd(&s_currDist[2*int(val)+params->classValues[inst]],weight); else atomicAdd(&s_currDist[params->classValues[inst]],weight); window += thread_group_size; } } __device__ c_precision lnFunc(c_precision num){ if(num <= 1e-6){ return 0; } else{ return num * log(num); } } __device__ c_precision entropyConditionedOnRows(unsigned int *matrix){ unsigned int nodes = (s_attType >= max_nominal) ? 2 : s_attType; c_precision returnValue = 0, sumForRow, total = 0; for (int i = 0; i < nodes; i++) { sumForRow = 0; for (int j = 0; j < 2; j++) { returnValue = returnValue + lnFunc(matrix[2*i+j]); sumForRow += matrix[2*i+j]; } returnValue = returnValue - lnFunc(sumForRow); total += sumForRow; } if(total < 1.0e-6) { return 0; } return -returnValue / (total * log(c_precision(2.0))); } __device__ c_precision entropyOverColumns(unsigned int *matrix){ unsigned int nodes = (s_attType >= max_nominal) ? 2 : s_attType; c_precision returnValue = 0, sumForColumn, total = 0; for (int j = 0; j < 2; j++){ sumForColumn = 0; for(int i = 0; i < nodes; i++){ sumForColumn += matrix[2*i+j]; } returnValue = returnValue - lnFunc(sumForColumn); total += sumForColumn; } if(total < 1.0e-6){ return 0; } return (returnValue + lnFunc(total)) / (total * log(c_precision(2.0))); } }
7b4bfc62f44fbff185789cfcc22ca2c36956cd7e.cu
#include "cuda_RandomForest_Constants.cu" #include "..\cuda_Common_Include.cu" #include "..\cuda_Common_RNG.cu" namespace ExtremeFindSplit{ __constant__ SharedBuffer cb_constants; // Host function for updates to constants __host__ void cuda_RandomForest_UpdateConstants(void* src){ cudaMemcpyToSymbolAsync(cb_constants,src,sizeof(SharedBuffer)); } __device__ c_precision entropyConditionedOnRows(unsigned int *matrix); __device__ c_precision entropyOverColumns(unsigned int *matrix); __device__ void evaluateNumericAttribute(paramPack_Kernel* params); __device__ void evaluateNominalAttribute(paramPack_Kernel* params); __shared__ unsigned int s_attribute, s_nodeIndStart, s_numInds, s_treeId, s_attType; __shared__ unsigned int s_currDist[max_nominal*2]; __shared__ c_precision s_split; __global__ void kernel_entry(paramPack_Kernel params){ unsigned int nodeId = params.innerNodeIds[blockIdx.x + cb_constants.cb_currentDepth + (cb_constants.cb_nodeBufferEnd+1)*cb_constants.cb_nodeIdFlip]; // Block initialization if(threadIdx.x == 0){ if(cb_constants.cb_numFeatures == 0) params.splitVals[nodeId] = -FLT_MAX; s_treeId = params.treeIds[nodeId]; s_split = 0; s_nodeIndStart = params.nodeIndicesLimits[nodeId]; s_numInds = params.nodeIndicesLimits[(cb_constants.cb_nodeBufferEnd+1)+nodeId] - s_nodeIndStart + 1; stateRNG_xorShift128 state; state.x = params.rngStates[blockIdx.x]; state.y = params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)]; state.z = params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)*2]; state.w = params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)*3]; s_attribute = xorShift128(&state) % cb_constants.cb_attributeCount; s_attType = params.attributeTypes[s_attribute]; if(s_attType >= max_nominal){ unsigned int instanceInd; c_precision splitPoint = 0; for(unsigned int i=0; i<10; ++i){ instanceInd = params.nodeIndices[s_nodeIndStart + (xorShift128(&state) % s_numInds)]; splitPoint += params.dataset[cb_constants.cb_instanceCount*s_attribute + instanceInd]; } s_split = splitPoint/10; s_currDist[0] = s_currDist[1] = 0; s_currDist[2] = s_currDist[3] = 0; } params.rngStates[blockIdx.x] = state.x; params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)] = state.y; params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)*2] = state.z; params.rngStates[blockIdx.x + (cb_constants.cb_nodeBufferEnd+1)*3] = state.w; } __syncthreads(); if(s_attType >= max_nominal) evaluateNumericAttribute(&params); else evaluateNominalAttribute(&params); __syncthreads(); // Block global memory writes if(threadIdx.x == 0){ c_precision prior = entropyOverColumns(s_currDist); c_precision posterior = entropyConditionedOnRows(s_currDist); if(params.splitVals[nodeId] < prior-posterior){ // Save splitpoint, attribute and distribution params.splitVals[nodeId] = prior-posterior; params.splitPoints[nodeId] = s_split; params.attributes[nodeId] = s_attribute; } } } __device__ void evaluateNumericAttribute(paramPack_Kernel* params){ unsigned int window = 0; unsigned int numInds = s_numInds; unsigned int nodeIndStart = s_nodeIndStart; unsigned int attribute = s_attribute; unsigned int treeId = s_treeId; unsigned int weight; unsigned int inst; c_precision val; while(threadIdx.x + window < numInds){ inst = params->nodeIndices[nodeIndStart + threadIdx.x + window]; val = params->dataset[cb_constants.cb_instanceCount * attribute + inst]; weight = params->bagWeights[treeId*cb_constants.cb_instanceCount + inst]; if(val != -FLT_MAX) atomicAdd(&s_currDist[2*((val < s_split) ? 0 : 1)+params->classValues[inst]],weight); else atomicAdd(&s_currDist[params->classValues[inst]],weight); window += thread_group_size; } } __device__ void evaluateNominalAttribute(paramPack_Kernel* params){ unsigned int window = 0; unsigned int numInds = s_numInds; unsigned int nodeIndStart = s_nodeIndStart; unsigned int attribute = s_attribute; unsigned int treeId = s_treeId; unsigned int weight; unsigned int inst; c_precision val; if(threadIdx.x < 40){ s_currDist[threadIdx.x] = 0.0; } __syncthreads(); // Split on median value while(threadIdx.x + window < numInds){ inst = params->nodeIndices[nodeIndStart + threadIdx.x + window]; val = params->dataset[cb_constants.cb_instanceCount * attribute + inst]; weight = params->bagWeights[treeId*cb_constants.cb_instanceCount + inst]; if(val != -FLT_MAX) atomicAdd(&s_currDist[2*int(val)+params->classValues[inst]],weight); else atomicAdd(&s_currDist[params->classValues[inst]],weight); window += thread_group_size; } } __device__ c_precision lnFunc(c_precision num){ if(num <= 1e-6){ return 0; } else{ return num * log(num); } } __device__ c_precision entropyConditionedOnRows(unsigned int *matrix){ unsigned int nodes = (s_attType >= max_nominal) ? 2 : s_attType; c_precision returnValue = 0, sumForRow, total = 0; for (int i = 0; i < nodes; i++) { sumForRow = 0; for (int j = 0; j < 2; j++) { returnValue = returnValue + lnFunc(matrix[2*i+j]); sumForRow += matrix[2*i+j]; } returnValue = returnValue - lnFunc(sumForRow); total += sumForRow; } if(total < 1.0e-6) { return 0; } return -returnValue / (total * log(c_precision(2.0))); } __device__ c_precision entropyOverColumns(unsigned int *matrix){ unsigned int nodes = (s_attType >= max_nominal) ? 2 : s_attType; c_precision returnValue = 0, sumForColumn, total = 0; for (int j = 0; j < 2; j++){ sumForColumn = 0; for(int i = 0; i < nodes; i++){ sumForColumn += matrix[2*i+j]; } returnValue = returnValue - lnFunc(sumForColumn); total += sumForColumn; } if(total < 1.0e-6){ return 0; } return (returnValue + lnFunc(total)) / (total * log(c_precision(2.0))); } }
225675cee1505a9e79e9c60b0cce48521ad8c083.hip
// !!! This is a file automatically generated by hipify!!! // *********************************************************************** // // Demo program for education in subject // Computer Architectures and Parallel Systems. // Petr Olivka, dep. of Computer Science, FEI, VSB-TU Ostrava // email:[email protected] // // Example of CUDA Technology Usage with unified memory. // // Image transformation from RGB to BW schema. // // *********************************************************************** #include <stdio.h> #include <cuda_device_runtime_api.h> #include <hip/hip_runtime.h> #include "pic_type.h" // Demo kernel to transform RGB color schema to BW schema __global__ void kernel_grayscale( CUDA_Pic colorPic, CUDA_Pic bwPic ) { // X,Y coordinates and check image dimensions int y = blockDim.y * blockIdx.y + threadIdx.y; int x = blockDim.x * blockIdx.x + threadIdx.x; if ( y >= colorPic.Size.y ) return; if ( x >= colorPic.Size.x ) return; // Get point from color picture uchar3 bgr = colorPic.P_uchar3[ y * colorPic.Size.x + x ]; // Store BW point to new image bwPic.P_uchar1[ y * bwPic.Size.x + x ].x = bgr.x * 0.11 + bgr.y * 0.59 + bgr.z * 0.30; } void cu_run_grayscale( CUDA_Pic colorPic, CUDA_Pic bwPic ) { hipError_t cerr; // Grid creation, size of grid must be equal or greater than images int block_size = 16; dim3 blocks( ( colorPic.Size.x + block_size - 1 ) / block_size, ( colorPic.Size.y + block_size - 1 ) / block_size ); dim3 threads( block_size, block_size ); hipLaunchKernelGGL(( kernel_grayscale), dim3(blocks), dim3(threads) , 0, 0, colorPic, bwPic ); if ( ( cerr = hipGetLastError() ) != hipSuccess ) printf( "CUDA Error [%d] - '%s'\n", __LINE__, hipGetErrorString( cerr ) ); hipDeviceSynchronize(); }
225675cee1505a9e79e9c60b0cce48521ad8c083.cu
// *********************************************************************** // // Demo program for education in subject // Computer Architectures and Parallel Systems. // Petr Olivka, dep. of Computer Science, FEI, VSB-TU Ostrava // email:[email protected] // // Example of CUDA Technology Usage with unified memory. // // Image transformation from RGB to BW schema. // // *********************************************************************** #include <stdio.h> #include <cuda_device_runtime_api.h> #include <cuda_runtime.h> #include "pic_type.h" // Demo kernel to transform RGB color schema to BW schema __global__ void kernel_grayscale( CUDA_Pic colorPic, CUDA_Pic bwPic ) { // X,Y coordinates and check image dimensions int y = blockDim.y * blockIdx.y + threadIdx.y; int x = blockDim.x * blockIdx.x + threadIdx.x; if ( y >= colorPic.Size.y ) return; if ( x >= colorPic.Size.x ) return; // Get point from color picture uchar3 bgr = colorPic.P_uchar3[ y * colorPic.Size.x + x ]; // Store BW point to new image bwPic.P_uchar1[ y * bwPic.Size.x + x ].x = bgr.x * 0.11 + bgr.y * 0.59 + bgr.z * 0.30; } void cu_run_grayscale( CUDA_Pic colorPic, CUDA_Pic bwPic ) { cudaError_t cerr; // Grid creation, size of grid must be equal or greater than images int block_size = 16; dim3 blocks( ( colorPic.Size.x + block_size - 1 ) / block_size, ( colorPic.Size.y + block_size - 1 ) / block_size ); dim3 threads( block_size, block_size ); kernel_grayscale<<< blocks, threads >>>( colorPic, bwPic ); if ( ( cerr = cudaGetLastError() ) != cudaSuccess ) printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) ); cudaDeviceSynchronize(); }
051e795268b3d5971efd359b594f74948c48e596.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/graph_send_recv_grad_kernel.h" #include <algorithm> #include <vector> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/gpu/graph_send_recv_funcs.h" namespace phi { template <typename Context, typename T, typename IndexT> void GraphSendRecvGradOpCUDAKernelLaunchHelper( const Context& ctx, const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, const std::string& reduce_op, DenseTensor* x_grad, const DenseTensor* dst_count = nullptr, const DenseTensor* out = nullptr) { const int& index_size = dst_index.dims()[0]; ctx.template Alloc<T>(x_grad); T* p_output = x_grad->data<T>(); const auto& src_dims = x.dims(); int64_t memset_size = 1; for (int i = 0; i < src_dims.size(); ++i) { memset_size *= src_dims[i]; } const size_t& memset_bytes = memset_size * sizeof(T); #ifdef PADDLE_WITH_HIP hipMemset(p_output, 0, memset_bytes); #else hipMemset(p_output, 0, memset_bytes); #endif if (index_size == 0) return; int64_t slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) { slice_size *= src_dims[i]; } const T* p_src = out_grad.data<T>(); const IndexT* s_index = src_index.data<IndexT>(); const IndexT* d_index = dst_index.data<IndexT>(); #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int64_t n = slice_size * index_size; int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_tmp = (n + block - 1) / block; int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; int64_t input_size = src_dims[0]; if (reduce_op == "SUM") { GraphSendRecvSumCUDAFunctor<T, IndexT> functor; hipLaunchKernelGGL(( GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>>) , dim3(grid), dim3(block), 0, ctx.stream(), p_src, d_index, s_index, p_output, index_size, slice_size, functor); } else if (reduce_op == "MEAN") { const int32_t* s_count = dst_count->data<int32_t>(); hipLaunchKernelGGL(( ManipulateMeanGradCUDAKernel<T, IndexT>), dim3(grid), dim3(block), 0, ctx.stream(), p_src, d_index, s_index, p_output, index_size, slice_size, s_count); } else if (reduce_op == "MAX" || reduce_op == "MIN") { const T* ptr_input = x.data<T>(); const T* ptr_output = out->data<T>(); hipLaunchKernelGGL(( ManipulateMinMaxGradCUDAKernel<T, IndexT>) , dim3(grid), dim3(block), 0, ctx.stream(), p_src, d_index, s_index, p_output, index_size, slice_size, ptr_input, ptr_output); } } template <typename T, typename Context> void GraphSendRecvGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, const paddle::optional<DenseTensor>& out, const paddle::optional<DenseTensor>& dst_count, const DenseTensor& out_grad, const std::string& reduce_op, DenseTensor* x_grad) { auto index_type = src_index.dtype(); if (index_type == phi::DataType::INT32) { GraphSendRecvGradOpCUDAKernelLaunchHelper<Context, T, int32_t>( ctx, out_grad, x, src_index, dst_index, reduce_op, x_grad, dst_count.get_ptr(), out.get_ptr()); } else if (index_type == phi::DataType::INT64) { GraphSendRecvGradOpCUDAKernelLaunchHelper<Context, T, int64_t>( ctx, out_grad, x, src_index, dst_index, reduce_op, x_grad, dst_count.get_ptr(), out.get_ptr()); } } } // namespace phi PD_REGISTER_KERNEL(graph_send_recv_grad, GPU, ALL_LAYOUT, phi::GraphSendRecvGradKernel, float, double, int, int64_t, phi::dtype::float16) {}
051e795268b3d5971efd359b594f74948c48e596.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/graph_send_recv_grad_kernel.h" #include <algorithm> #include <vector> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/gpu/graph_send_recv_funcs.h" namespace phi { template <typename Context, typename T, typename IndexT> void GraphSendRecvGradOpCUDAKernelLaunchHelper( const Context& ctx, const DenseTensor& out_grad, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, const std::string& reduce_op, DenseTensor* x_grad, const DenseTensor* dst_count = nullptr, const DenseTensor* out = nullptr) { const int& index_size = dst_index.dims()[0]; ctx.template Alloc<T>(x_grad); T* p_output = x_grad->data<T>(); const auto& src_dims = x.dims(); int64_t memset_size = 1; for (int i = 0; i < src_dims.size(); ++i) { memset_size *= src_dims[i]; } const size_t& memset_bytes = memset_size * sizeof(T); #ifdef PADDLE_WITH_HIP hipMemset(p_output, 0, memset_bytes); #else cudaMemset(p_output, 0, memset_bytes); #endif if (index_size == 0) return; int64_t slice_size = 1; for (int i = 1; i < src_dims.size(); ++i) { slice_size *= src_dims[i]; } const T* p_src = out_grad.data<T>(); const IndexT* s_index = src_index.data<IndexT>(); const IndexT* d_index = dst_index.data<IndexT>(); #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int64_t n = slice_size * index_size; int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_tmp = (n + block - 1) / block; int64_t grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; int64_t input_size = src_dims[0]; if (reduce_op == "SUM") { GraphSendRecvSumCUDAFunctor<T, IndexT> functor; GraphSendRecvCUDAKernel<T, IndexT, GraphSendRecvSumCUDAFunctor<T, IndexT>> <<<grid, block, 0, ctx.stream()>>>( p_src, d_index, s_index, p_output, index_size, slice_size, functor); } else if (reduce_op == "MEAN") { const int32_t* s_count = dst_count->data<int32_t>(); ManipulateMeanGradCUDAKernel<T, IndexT><<<grid, block, 0, ctx.stream()>>>( p_src, d_index, s_index, p_output, index_size, slice_size, s_count); } else if (reduce_op == "MAX" || reduce_op == "MIN") { const T* ptr_input = x.data<T>(); const T* ptr_output = out->data<T>(); ManipulateMinMaxGradCUDAKernel<T, IndexT> <<<grid, block, 0, ctx.stream()>>>(p_src, d_index, s_index, p_output, index_size, slice_size, ptr_input, ptr_output); } } template <typename T, typename Context> void GraphSendRecvGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& src_index, const DenseTensor& dst_index, const paddle::optional<DenseTensor>& out, const paddle::optional<DenseTensor>& dst_count, const DenseTensor& out_grad, const std::string& reduce_op, DenseTensor* x_grad) { auto index_type = src_index.dtype(); if (index_type == phi::DataType::INT32) { GraphSendRecvGradOpCUDAKernelLaunchHelper<Context, T, int32_t>( ctx, out_grad, x, src_index, dst_index, reduce_op, x_grad, dst_count.get_ptr(), out.get_ptr()); } else if (index_type == phi::DataType::INT64) { GraphSendRecvGradOpCUDAKernelLaunchHelper<Context, T, int64_t>( ctx, out_grad, x, src_index, dst_index, reduce_op, x_grad, dst_count.get_ptr(), out.get_ptr()); } } } // namespace phi PD_REGISTER_KERNEL(graph_send_recv_grad, GPU, ALL_LAYOUT, phi::GraphSendRecvGradKernel, float, double, int, int64_t, phi::dtype::float16) {}
89a4647e2eea948b2c95c6978e5bac5c2fc0c2b8.hip
// !!! This is a file automatically generated by hipify!!! #include "Header.h" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <iostream> #define MASK_LENGTH 7 __constant__ int mask[MASK_LENGTH]; __global__ void ConstantMem(int* a, int* result, const int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int r = MASK_LENGTH / 2; int start = tid - r; int temp = 0; for (int i = 0; i < MASK_LENGTH; i++) { if (((start + i) >= 0) && ((start + i) < N)) { temp += a[start + i] * mask[i]; } } result[tid] = temp; }
89a4647e2eea948b2c95c6978e5bac5c2fc0c2b8.cu
#include "Header.h" #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <iostream> #define MASK_LENGTH 7 __constant__ int mask[MASK_LENGTH]; __global__ void ConstantMem(int* a, int* result, const int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int r = MASK_LENGTH / 2; int start = tid - r; int temp = 0; for (int i = 0; i < MASK_LENGTH; i++) { if (((start + i) >= 0) && ((start + i) < N)) { temp += a[start + i] * mask[i]; } } result[tid] = temp; }
346c787df4b71791dbc73ff0ca867567515383ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "morphology.h" enum class MorphOpType { ERODE, DILATE, }; template <MorphOpType opType> inline __device__ unsigned char elementOp(unsigned char lhs, unsigned char rhs) { } template <> inline __device__ unsigned char elementOp<MorphOpType::ERODE>(unsigned char lhs, unsigned char rhs) { return min(lhs, rhs); } template <> inline __device__ unsigned char elementOp<MorphOpType::DILATE>(unsigned char lhs, unsigned char rhs) { return max(lhs, rhs); } template <MorphOpType opType> inline __device__ unsigned char borderValue() { } template <> inline __device__ unsigned char borderValue<MorphOpType::ERODE>() { return BLACK; } template <> inline __device__ unsigned char borderValue<MorphOpType::DILATE>() { return WHITE; } // NOTE: step-efficient parallel scan template <MorphOpType opType> __device__ void reversedScan( const unsigned char* __restrict__ buffer, unsigned char* __restrict__ opArray, const int selSize, const int tid) { opArray[tid] = buffer[tid]; __syncthreads(); for (int offset = 1; offset < selSize; offset *= 2) { if (tid <= selSize - 1 - offset) { opArray[tid] = elementOp<opType>(opArray[tid], opArray[tid + offset]); } __syncthreads(); } } // NOTE: step-efficient parallel scan template <MorphOpType opType> __device__ void scan( const unsigned char* __restrict__ buffer, unsigned char* __restrict__ opArray, const int selSize, const int tid) { opArray[tid] = buffer[tid]; __syncthreads(); for (int offset = 1; offset < selSize; offset *= 2) { if (tid >= offset) { opArray[tid] = elementOp<opType>(opArray[tid], opArray[tid - offset]); } __syncthreads(); } } // NOTE: step-efficient parallel scan template <MorphOpType opType> __device__ void twoWayScan( const unsigned char* __restrict__ buffer, unsigned char* __restrict__ opArray, const int selSize, const int tid) { opArray[tid] = buffer[tid]; opArray[tid + selSize] = buffer[tid + selSize]; __syncthreads(); for (int offset = 1; offset < selSize; offset *= 2) { if (tid >= offset) { opArray[tid + selSize - 1] = elementOp<opType>(opArray[tid + selSize - 1], opArray[tid + selSize - 1 - offset]); } if (tid <= selSize - 1 - offset) { opArray[tid] = elementOp<opType>(opArray[tid], opArray[tid + offset]); } __syncthreads(); } } template <MorphOpType opType> __global__ void vhgw_horiz( unsigned char* __restrict__ dst, const unsigned char* __restrict__ src, const int width, const int height, const int selSize ) { extern __shared__ unsigned char sMem[]; unsigned char* buffer = sMem; unsigned char* opArray = buffer + 2 * selSize; const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if (tidx >= width || tidy >= height) return; buffer[threadIdx.x] = src[tidy * width + tidx]; if (tidx + selSize < width) { buffer[threadIdx.x + selSize] = src[tidy * width + tidx + selSize]; } __syncthreads(); twoWayScan<opType>(buffer, opArray, selSize, threadIdx.x); if (tidx + selSize/2 < width - selSize/2) { dst[tidy * width + tidx + selSize/2] = elementOp<opType>(opArray[threadIdx.x], opArray[threadIdx.x + selSize - 1]); } } template <MorphOpType opType> __global__ void vhgw_vert( unsigned char* __restrict__ dst, const unsigned char* __restrict__ src, const int width, const int height, const int selSize) { extern __shared__ unsigned char sMem[]; unsigned char* buffer = sMem; unsigned char* opArray = buffer + 2 * selSize; const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if (tidy >= height || tidx >= width) { return; } buffer[threadIdx.y] = src[tidy * width + tidx]; if (tidy + selSize < height) { buffer[threadIdx.y + selSize] = src[(tidy + selSize) * width + tidx]; } __syncthreads(); twoWayScan<opType>(buffer, opArray, selSize, threadIdx.y); if (tidy + selSize/2 < height - selSize/2) { dst[(tidy + selSize/2) * width + tidx] = elementOp<opType>(opArray[threadIdx.y], opArray[threadIdx.y + selSize - 1]); } if (tidy < selSize/2 || tidy >= height - selSize/2) { dst[tidy * width + tidx] = borderValue<opType>(); } } template <MorphOpType opType> double morphology( unsigned char* img_d, unsigned char* tmp_d, const int width, const int height, const int hsize, const int vsize) { unsigned int memSize = width * height * sizeof(unsigned char); dim3 blockSize_h; dim3 gridSize_h; dim3 blockSize_v; dim3 gridSize_v; hipMemset(tmp_d, 0, memSize); blockSize_h.x = hsize; blockSize_h.y = 1; gridSize_h.x = roundUp(width, blockSize_h.x); gridSize_h.y = roundUp(height, blockSize_h.y); size_t sMemSize_h = 4 * hsize * sizeof(unsigned char); blockSize_v.x = 1; blockSize_v.y = vsize; gridSize_v.x = roundUp(width, blockSize_v.x); gridSize_v.y = roundUp(height, blockSize_v.y); size_t sMemSize_v = 4 * vsize * sizeof(unsigned char); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(HIP_KERNEL_NAME(vhgw_horiz<opType>), gridSize_h, blockSize_h, sMemSize_h, 0, tmp_d, img_d, width, height, hsize); hipLaunchKernelGGL(HIP_KERNEL_NAME(vhgw_vert<opType>), gridSize_v, blockSize_v, sMemSize_v, 0, img_d, tmp_d, width, height, vsize); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); return time; } extern "C" double erode(unsigned char* img_d, unsigned char* tmp_d, const int width, const int height, const int hsize, const int vsize) { return morphology<MorphOpType::ERODE>(img_d, tmp_d, width, height, hsize, vsize); } extern "C" double dilate(unsigned char* img_d, unsigned char* tmp_d, const int width, const int height, const int hsize, const int vsize) { return morphology<MorphOpType::DILATE>(img_d, tmp_d, width, height, hsize, vsize); }
346c787df4b71791dbc73ff0ca867567515383ba.cu
#include "morphology.h" enum class MorphOpType { ERODE, DILATE, }; template <MorphOpType opType> inline __device__ unsigned char elementOp(unsigned char lhs, unsigned char rhs) { } template <> inline __device__ unsigned char elementOp<MorphOpType::ERODE>(unsigned char lhs, unsigned char rhs) { return min(lhs, rhs); } template <> inline __device__ unsigned char elementOp<MorphOpType::DILATE>(unsigned char lhs, unsigned char rhs) { return max(lhs, rhs); } template <MorphOpType opType> inline __device__ unsigned char borderValue() { } template <> inline __device__ unsigned char borderValue<MorphOpType::ERODE>() { return BLACK; } template <> inline __device__ unsigned char borderValue<MorphOpType::DILATE>() { return WHITE; } // NOTE: step-efficient parallel scan template <MorphOpType opType> __device__ void reversedScan( const unsigned char* __restrict__ buffer, unsigned char* __restrict__ opArray, const int selSize, const int tid) { opArray[tid] = buffer[tid]; __syncthreads(); for (int offset = 1; offset < selSize; offset *= 2) { if (tid <= selSize - 1 - offset) { opArray[tid] = elementOp<opType>(opArray[tid], opArray[tid + offset]); } __syncthreads(); } } // NOTE: step-efficient parallel scan template <MorphOpType opType> __device__ void scan( const unsigned char* __restrict__ buffer, unsigned char* __restrict__ opArray, const int selSize, const int tid) { opArray[tid] = buffer[tid]; __syncthreads(); for (int offset = 1; offset < selSize; offset *= 2) { if (tid >= offset) { opArray[tid] = elementOp<opType>(opArray[tid], opArray[tid - offset]); } __syncthreads(); } } // NOTE: step-efficient parallel scan template <MorphOpType opType> __device__ void twoWayScan( const unsigned char* __restrict__ buffer, unsigned char* __restrict__ opArray, const int selSize, const int tid) { opArray[tid] = buffer[tid]; opArray[tid + selSize] = buffer[tid + selSize]; __syncthreads(); for (int offset = 1; offset < selSize; offset *= 2) { if (tid >= offset) { opArray[tid + selSize - 1] = elementOp<opType>(opArray[tid + selSize - 1], opArray[tid + selSize - 1 - offset]); } if (tid <= selSize - 1 - offset) { opArray[tid] = elementOp<opType>(opArray[tid], opArray[tid + offset]); } __syncthreads(); } } template <MorphOpType opType> __global__ void vhgw_horiz( unsigned char* __restrict__ dst, const unsigned char* __restrict__ src, const int width, const int height, const int selSize ) { extern __shared__ unsigned char sMem[]; unsigned char* buffer = sMem; unsigned char* opArray = buffer + 2 * selSize; const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if (tidx >= width || tidy >= height) return; buffer[threadIdx.x] = src[tidy * width + tidx]; if (tidx + selSize < width) { buffer[threadIdx.x + selSize] = src[tidy * width + tidx + selSize]; } __syncthreads(); twoWayScan<opType>(buffer, opArray, selSize, threadIdx.x); if (tidx + selSize/2 < width - selSize/2) { dst[tidy * width + tidx + selSize/2] = elementOp<opType>(opArray[threadIdx.x], opArray[threadIdx.x + selSize - 1]); } } template <MorphOpType opType> __global__ void vhgw_vert( unsigned char* __restrict__ dst, const unsigned char* __restrict__ src, const int width, const int height, const int selSize) { extern __shared__ unsigned char sMem[]; unsigned char* buffer = sMem; unsigned char* opArray = buffer + 2 * selSize; const int tidx = threadIdx.x + blockIdx.x * blockDim.x; const int tidy = threadIdx.y + blockIdx.y * blockDim.y; if (tidy >= height || tidx >= width) { return; } buffer[threadIdx.y] = src[tidy * width + tidx]; if (tidy + selSize < height) { buffer[threadIdx.y + selSize] = src[(tidy + selSize) * width + tidx]; } __syncthreads(); twoWayScan<opType>(buffer, opArray, selSize, threadIdx.y); if (tidy + selSize/2 < height - selSize/2) { dst[(tidy + selSize/2) * width + tidx] = elementOp<opType>(opArray[threadIdx.y], opArray[threadIdx.y + selSize - 1]); } if (tidy < selSize/2 || tidy >= height - selSize/2) { dst[tidy * width + tidx] = borderValue<opType>(); } } template <MorphOpType opType> double morphology( unsigned char* img_d, unsigned char* tmp_d, const int width, const int height, const int hsize, const int vsize) { unsigned int memSize = width * height * sizeof(unsigned char); dim3 blockSize_h; dim3 gridSize_h; dim3 blockSize_v; dim3 gridSize_v; hipMemset(tmp_d, 0, memSize); blockSize_h.x = hsize; blockSize_h.y = 1; gridSize_h.x = roundUp(width, blockSize_h.x); gridSize_h.y = roundUp(height, blockSize_h.y); size_t sMemSize_h = 4 * hsize * sizeof(unsigned char); blockSize_v.x = 1; blockSize_v.y = vsize; gridSize_v.x = roundUp(width, blockSize_v.x); gridSize_v.y = roundUp(height, blockSize_v.y); size_t sMemSize_v = 4 * vsize * sizeof(unsigned char); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(HIP_KERNEL_NAME(vhgw_horiz<opType>), gridSize_h, blockSize_h, sMemSize_h, 0, tmp_d, img_d, width, height, hsize); hipLaunchKernelGGL(HIP_KERNEL_NAME(vhgw_vert<opType>), gridSize_v, blockSize_v, sMemSize_v, 0, img_d, tmp_d, width, height, vsize); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); return time; } extern "C" double erode(unsigned char* img_d, unsigned char* tmp_d, const int width, const int height, const int hsize, const int vsize) { return morphology<MorphOpType::ERODE>(img_d, tmp_d, width, height, hsize, vsize); } extern "C" double dilate(unsigned char* img_d, unsigned char* tmp_d, const int width, const int height, const int hsize, const int vsize) { return morphology<MorphOpType::DILATE>(img_d, tmp_d, width, height, hsize, vsize); }
77d9d8aad0866c32fe2d26c3e74c147a72728a39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/layout_utils.h" #include "paddle/fluid/operators/norm_utils.cu.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/flags.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/batch_norm_kernel.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/norm_utils.h" #include "paddle/phi/kernels/gpu/batch_norm_utils.h" #ifdef __HIPCC__ #define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim) #else #define LAUNCH_BOUNDS(BlockDim) #endif DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace phi { template <typename T> using CudnnDataType = paddle::platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias( const T *dy, const T *x, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, const double epsilon, const int N, const int C, const int HxW, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon); BatchNormParamType<T> mean_i = mean[i]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); db_sum += static_cast<BatchNormParamType<T>>(dy[index]); } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum * inv_var_i; dbias[i] = db_sum; } __syncthreads(); } } template <typename T, phi::DataLayout layout> static __global__ void KeBNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *variance, const double epsilon, const int C, const int HxW, const int num, T *dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == phi::DataLayout::kNCHW ? i / HxW % C : i % C; BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon); dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) * scale[c] * inv_var); } } template <typename T> static __global__ void KeBNRestoreData(const phi::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == phi::DataLayout::kNCHW ? (i / M) % C : i % C; auto y_i = static_cast<BatchNormParamType<T>>(y[i]); auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c]; x[i] = static_cast<T>(x_i); } } template <typename T> class InplaceHelper { public: void operator()(const phi::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y, int grid2, const int block, const gpuStream_t &stream) { PADDLE_ENFORCE_EQ(x, y, phi::errors::InvalidArgument( "X and Y should be inplaced in inplace mode")); hipLaunchKernelGGL(( KeBNRestoreData), dim3(grid2), dim3(block), 0, stream, layout, x, scale, bias, mean, variance, epsilon, C, M, num, y); } }; template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward( const T *dy, const T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *saved_mean, const BatchNormParamType<T> *saved_inv_variance, const int C, const int N, const int HxW, const double epsilon, T *dx, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; __shared__ typename BlockReduce::TempStorage mean_storage; __shared__ typename BlockReduce::TempStorage variance_storeage; __shared__ BatchNormParamType<T> inv_var_val; __shared__ BatchNormParamType<T> mean_val; __shared__ BatchNormParamType<T> dscale_val; __shared__ BatchNormParamType<T> dbias_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); if (saved_mean && saved_inv_variance) { if (threadIdx.x == 0) { inv_var_val = saved_inv_variance[i]; mean_val = saved_mean[i]; } } else { BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]); x_sum += x_i; x_square_sum += x_i * x_i; } x_sum = BlockReduce(mean_storage).Reduce(x_sum, hipcub::Sum()); x_square_sum = BlockReduce(variance_storeage).Reduce(x_square_sum, hipcub::Sum()); if (threadIdx.x == 0) { mean_val = x_sum / inner_size; inv_var_val = 1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon); } } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); ds_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val); db_sum += dy_i; } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum()); if (threadIdx.x == 0) { dscale_val = ds_sum * inv_var_val; dbias_val = db_sum; dscale[i] = dscale_val; dbias[i] = dbias_val; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = scale[i] * inv_var_val * (static_cast<BatchNormParamType<T>>(dy[index]) - dbias_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_val) * inv_var_val * dscale_val / inner_size); } } } template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData( const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *mean, const T *x, const BatchNormParamType<T> *variance, const int C, const int N, const int HxW, T *dx) { const int outer_size = C; const int inner_size = N * HxW; typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage dy_storage; __shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage; __shared__ BatchNormParamType<T> dy_sum_val; __shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> inv_var_i = variance[i]; BatchNormParamType<T> mean_i = mean[i]; BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> dy_x_sub_mean_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); dy_sum += dy_i; dy_x_sub_mean_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); } dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, hipcub::Sum()); dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage) .Reduce(dy_x_sub_mean_sum, hipcub::Sum()); if (threadIdx.x == 0) { dy_sum_val = dy_sum; dy_x_sub_mean_sum_val = dy_x_sub_mean_sum; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = (static_cast<BatchNormParamType<T>>(dy[index]) - dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_i) * dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) * scale[i] * inv_var_i; } } } template <typename T, typename Context> void BatchNormGradRawKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const paddle::optional<DenseTensor> &reserve_space, const DenseTensor &y_grad, float momentum, float epsilon_f, const std::string &data_layout_str, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, bool is_inplace, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *bias_grad) { double epsilon = static_cast<double>(epsilon_f); const DataLayout data_layout = paddle::framework::StringToDataLayout(data_layout_str); const auto *d_y = &y_grad; auto *d_x = x_grad; auto *d_scale = scale_grad; auto *d_bias = bias_grad; use_global_stats = is_test || use_global_stats; const auto &x_dims = x.dims(); PADDLE_ENFORCE_EQ( x_dims.size() >= 2 && x_dims.size() <= 5, true, phi::errors::InvalidArgument( "The size of input's dimensions should be between 2 and 5." "But received: the size of input's dimensions is [%d]," "the dimensions of input is [%s]", x_dims.size(), x_dims)); int N, C, H, W, D; phi::funcs::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); // init output if (d_x) { ctx.template Alloc<T>(d_x); } if (d_scale && d_bias) { ctx.template Alloc<BatchNormParamType<T>>(d_scale); ctx.template Alloc<BatchNormParamType<T>>(d_bias); } PADDLE_ENFORCE_EQ( scale.dims().size(), 1UL, phi::errors::InvalidArgument( "The size of scale's dimensions must equal to 1. But received: " "the size of scale's dimensions is [%d], the dimensions of scale " "is [%s].", scale.dims().size(), scale.dims())); PADDLE_ENFORCE_EQ( scale.dims()[0], C, phi::errors::InvalidArgument( "The first dimension of scale must equal to Channels[%d]. But " "received: the first dimension of scale is [%d]", C, scale.dims()[0])); auto dtype = paddle::platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP auto compute_format = data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; // TODO(wangran16): wait for MIOpen to improve the performance of BN // HIP do not support compute format of NHWC // auto compute_format = DataLayout::kNCHW; #else const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent && (reserve_space.get_ptr() != nullptr); auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; #endif DenseTensor transformed_x(x.type()); DenseTensor transformed_d_y(d_y->type()); DenseTensor transformed_d_x; if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW && x_dims.size() > 2) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x); TransToChannelFirst<Context, T>(ctx, &x, &transformed_x); ResizeToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y); TransToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y); if (d_x) { ResizeToChannelFirst<Context, T>(ctx, d_x, &transformed_d_x); } } else { transformed_x.ShareDataWith(x); transformed_d_y.ShareDataWith(*d_y); if (d_x) { transformed_d_x.ShareDataWith(*d_x); } } std::vector<int> dims; std::vector<int> strides; if (compute_format == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * C * D, 1, W * D * C, D * C, C}; } const int num = transformed_x.numel(); #ifdef HIPCC const int block = 256; #else const int block = 512; #endif int max_threads = ctx.GetMaxPhysicalThreadCount(); const int max_blocks = ::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = ::min(C, max_blocks); auto stream = ctx.stream(); InplaceHelper<T> inplace_functor; if (!use_global_stats) { if ((N * H * W * D) == 1) { if (d_x) { paddle::framework::TensorCopy(*d_y, ctx.GetPlace(), d_x); } phi::funcs::SetConstant<Context, BatchNormParamType<T>> functor; functor(ctx, d_scale, static_cast<BatchNormParamType<T>>(0)); functor(ctx, d_bias, static_cast<BatchNormParamType<T>>(0)); return; } // ------------------- cudnn descriptors --------------------- #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // miopenTensorDescriptor_t data_desc_; // miopenTensorDescriptor_t bn_param_desc_; // miopenBatchNormMode_t mode_; // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&data_desc_)); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_)); #else cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnCreateTensorDescriptor( &bn_param_desc_)); #endif if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON); #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // mode_ = miopenBNSpatial; #elif CUDNN_VERSION_MIN(7, 0, 1) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #endif // CUDNN_VERSION_MIN(7, 0, 1) #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( // data_desc_, CudnnDataType<T>::type, // x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()), // const_cast<int *>(strides.data()))); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_, // data_desc_, mode_)); #else PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); #endif const auto *saved_mean_data = saved_mean.template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_variance.template data<BatchNormParamType<T>>(); if (is_inplace) { inplace_functor(compute_format, transformed_x.data<T>(), scale.template data<BatchNormParamType<T>>(), bias.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, epsilon, C, H * W * D, num, transformed_x.data<T>(), grid2, block, stream); } // This branch calls CUDNN APIs if (d_x && d_scale && d_bias) { #ifdef PADDLE_WITH_HIP if (compute_format == DataLayout::kNCHW) { hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, ctx.stream(), transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } else { hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, ctx.stream(), transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenBatchNormalizationBackward( // dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), // CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), // CudnnDataType<T>::kZero(), data_desc_, // transformed_x.template data<T>(), data_desc_, // transformed_d_y.template data<T>(), data_desc_, // transformed_d_x.template mutable_data<T>(ctx.GetPlace()), // bn_param_desc_, scale->template data<BatchNormParamType<T>>(), // d_scale->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace()), // d_bias->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace()), // epsilon, saved_mean_data, saved_var_data)); #else // CUDNN PER_ACTIVATION mode only support small batch size const size_t CUDNN_PER_ACTIVATION_THRESHOLD = 131070; const bool use_native_kernel = (x_dims.size() == 2 && N >= CUDNN_PER_ACTIVATION_THRESHOLD); if (use_native_kernel) { if (compute_format == DataLayout::kNCHW) { hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, ctx.stream(), transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } else { hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, ctx.stream(), transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } } else { #if CUDNN_VERSION_MIN(7, 4, 1) size_t workspace_size = 0; void *workspace_ptr = nullptr; DenseTensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload:: cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/ctx.cudnn_handle(), /*mode=*/mode_, /*bnIps=*/CUDNN_BATCHNORM_OPS_BN, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/nullptr, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/nullptr, /*sizeInBytes=*/&workspace_size)); workspace_tensor.Resize({static_cast<int64_t>(workspace_size)}); workspace_ptr = static_cast<void *>(ctx.template Alloc<uint8_t>(&workspace_tensor)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/CUDNN_BATCHNORM_OPS_BN, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/transformed_x.template data<T>(), /*yDesc=*/nullptr, /*yData=*/nullptr, /*dyDesc=*/data_desc_, /*dyData=*/transformed_d_y.template data<T>(), /*dzDesc=*/nullptr, /*dzData=*/nullptr, /*dxDesc=*/data_desc_, /*dxData=*/ctx.template Alloc<T>(&transformed_d_x), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale.template data<BatchNormParamType<T>>(), /*bnBiasData=*/nullptr, /*dBnScaleData=*/ ctx.template Alloc<BatchNormParamType<T>>(d_scale), /*dBnBiasData=*/ ctx.template Alloc<BatchNormParamType<T>>(d_bias), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesc=*/nullptr, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/ const_cast<uint8_t *>(reserve_space->template data<uint8_t>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); #else PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnBatchNormalizationBackward( ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_d_y.template data<T>(), data_desc_, ctx.template Alloc<T>(&transformed_d_x), bn_param_desc_, scale.template data<BatchNormParamType<T>>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias), epsilon, saved_mean_data, saved_var_data)); #endif // CUDNN_VERSION_MIN(7, 4, 1) } #endif if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform batchnorm output from NCHW to NHWC"; TransToChannelLast<Context, T>(ctx, &transformed_d_x, d_x); } } else { // This branch call CUDA kernels if (compute_format == DataLayout::kNCHW) { if (d_x) { hipLaunchKernelGGL(( BNBackwardData<T, block, phi::DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, ctx.stream(), d_y->data<T>(), scale.data<BatchNormParamType<T>>(), saved_mean_data, x.data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, stream, d_y->data<T>(), x.data<T>(), saved_mean_data, saved_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { hipLaunchKernelGGL(( BNBackwardData<T, block, phi::DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, ctx.stream(), d_y->data<T>(), scale.data<BatchNormParamType<T>>(), saved_mean_data, x.data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, stream, d_y->data<T>(), x.data<T>(), saved_mean_data, saved_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // clean when exit. // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(data_desc_)); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_)); #else // clean when exit. PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDestroyTensorDescriptor( bn_param_desc_)); #endif } else { const auto *running_mean = mean.get_ptr(); const auto *running_var = variance.get_ptr(); const auto *running_mean_data = running_mean->template data<BatchNormParamType<T>>(); const auto *running_var_data = running_var->template data<BatchNormParamType<T>>(); if (is_inplace) { auto px = x; inplace_functor(data_layout, ctx.template Alloc<T>(&px), scale.template data<BatchNormParamType<T>>(), bias.template data<BatchNormParamType<T>>(), running_mean_data, running_var_data, epsilon, C, H * W * D, num, x.data<T>(), grid2, block, stream); } if (compute_format == DataLayout::kNCHW) { if (d_x) { hipLaunchKernelGGL(( KeBNBackwardData<T, phi::DataLayout::kNCHW>) , dim3(grid1), dim3(block), 0, stream, d_y->data<T>(), scale.data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, stream, d_y->data<T>(), x.data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { hipLaunchKernelGGL(( KeBNBackwardData<T, phi::DataLayout::kNHWC>) , dim3(grid1), dim3(block), 0, stream, d_y->data<T>(), scale.data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, stream, d_y->data<T>(), x.data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } } template <typename T, typename Context> void BatchNormGradKernel(const Context &dev_ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const paddle::optional<DenseTensor> &reserve_space, const DenseTensor &y_grad, float momentum, float epsilon, const std::string &data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *bias_grad) { BatchNormGradRawKernel<T, Context>(dev_ctx, x, scale, bias, mean, variance, saved_mean, saved_variance, reserve_space, y_grad, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics, fuse_with_relu, false, x_grad, scale_grad, bias_grad); } template <typename T, typename Context> void BatchNormDoubleGradKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const DenseTensor &y_grad, const DenseTensor &x_grad_grad, const DenseTensor &scale_grad_grad, const DenseTensor &bias_grad_grad, float momentum, float epsilon, const std::string &data_layout_str, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *y_grad_grad) { PADDLE_ENFORCE_EQ(is_test, false, phi::errors::InvalidArgument( "`is_test = True` CANNOT be used in train program. If " "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); const DataLayout data_layout = paddle::framework::StringToDataLayout(data_layout_str); const DenseTensor *running_mean = nullptr; const DenseTensor *running_variance = nullptr; if (use_global_stats) { running_mean = mean.get_ptr(); running_variance = variance.get_ptr(); } paddle::operators::NormDoubleGradFunctor<Context, T>(ctx, data_layout, &x, &scale, &y_grad, &saved_mean, &saved_variance, running_mean, running_variance, epsilon, use_global_stats, &x_grad_grad, &scale_grad_grad, &bias_grad_grad, x_grad, scale_grad, y_grad_grad); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(batch_norm_grad, GPU, ALL_LAYOUT, phi::BatchNormGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(batch_norm_grad_raw, GPU, ALL_LAYOUT, phi::BatchNormGradRawKernel, float, phi::dtype::float16) {} #else PD_REGISTER_KERNEL(batch_norm_grad, GPU, ALL_LAYOUT, phi::BatchNormGradKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad } } PD_REGISTER_KERNEL(batch_norm_grad_raw, GPU, ALL_LAYOUT, phi::BatchNormGradRawKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad } } #endif #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(batch_norm_grad_grad, GPU, ALL_LAYOUT, phi::BatchNormDoubleGradKernel, float, double) {} #else PD_REGISTER_KERNEL(batch_norm_grad_grad, GPU, ALL_LAYOUT, phi::BatchNormDoubleGradKernel, float, double) {} #endif
77d9d8aad0866c32fe2d26c3e74c147a72728a39.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/layout_utils.h" #include "paddle/fluid/operators/norm_utils.cu.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/flags.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/batch_norm_kernel.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/norm_utils.h" #include "paddle/phi/kernels/gpu/batch_norm_utils.h" #ifdef __HIPCC__ #define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim) #else #define LAUNCH_BOUNDS(BlockDim) #endif DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace phi { template <typename T> using CudnnDataType = paddle::platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias( const T *dy, const T *x, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, const double epsilon, const int N, const int C, const int HxW, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon); BatchNormParamType<T> mean_i = mean[i]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); db_sum += static_cast<BatchNormParamType<T>>(dy[index]); } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum * inv_var_i; dbias[i] = db_sum; } __syncthreads(); } } template <typename T, phi::DataLayout layout> static __global__ void KeBNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *variance, const double epsilon, const int C, const int HxW, const int num, T *dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == phi::DataLayout::kNCHW ? i / HxW % C : i % C; BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon); dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) * scale[c] * inv_var); } } template <typename T> static __global__ void KeBNRestoreData(const phi::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == phi::DataLayout::kNCHW ? (i / M) % C : i % C; auto y_i = static_cast<BatchNormParamType<T>>(y[i]); auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c]; x[i] = static_cast<T>(x_i); } } template <typename T> class InplaceHelper { public: void operator()(const phi::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y, int grid2, const int block, const gpuStream_t &stream) { PADDLE_ENFORCE_EQ(x, y, phi::errors::InvalidArgument( "X and Y should be inplaced in inplace mode")); KeBNRestoreData<<<grid2, block, 0, stream>>>( layout, x, scale, bias, mean, variance, epsilon, C, M, num, y); } }; template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward( const T *dy, const T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *saved_mean, const BatchNormParamType<T> *saved_inv_variance, const int C, const int N, const int HxW, const double epsilon, T *dx, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; __shared__ typename BlockReduce::TempStorage mean_storage; __shared__ typename BlockReduce::TempStorage variance_storeage; __shared__ BatchNormParamType<T> inv_var_val; __shared__ BatchNormParamType<T> mean_val; __shared__ BatchNormParamType<T> dscale_val; __shared__ BatchNormParamType<T> dbias_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); if (saved_mean && saved_inv_variance) { if (threadIdx.x == 0) { inv_var_val = saved_inv_variance[i]; mean_val = saved_mean[i]; } } else { BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]); x_sum += x_i; x_square_sum += x_i * x_i; } x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum()); x_square_sum = BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum()); if (threadIdx.x == 0) { mean_val = x_sum / inner_size; inv_var_val = 1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon); } } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); ds_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val); db_sum += dy_i; } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum()); if (threadIdx.x == 0) { dscale_val = ds_sum * inv_var_val; dbias_val = db_sum; dscale[i] = dscale_val; dbias[i] = dbias_val; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = scale[i] * inv_var_val * (static_cast<BatchNormParamType<T>>(dy[index]) - dbias_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_val) * inv_var_val * dscale_val / inner_size); } } } template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData( const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *mean, const T *x, const BatchNormParamType<T> *variance, const int C, const int N, const int HxW, T *dx) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage dy_storage; __shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage; __shared__ BatchNormParamType<T> dy_sum_val; __shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> inv_var_i = variance[i]; BatchNormParamType<T> mean_i = mean[i]; BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> dy_x_sub_mean_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); dy_sum += dy_i; dy_x_sub_mean_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); } dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, cub::Sum()); dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage) .Reduce(dy_x_sub_mean_sum, cub::Sum()); if (threadIdx.x == 0) { dy_sum_val = dy_sum; dy_x_sub_mean_sum_val = dy_x_sub_mean_sum; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = (static_cast<BatchNormParamType<T>>(dy[index]) - dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_i) * dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) * scale[i] * inv_var_i; } } } template <typename T, typename Context> void BatchNormGradRawKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const paddle::optional<DenseTensor> &reserve_space, const DenseTensor &y_grad, float momentum, float epsilon_f, const std::string &data_layout_str, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, bool is_inplace, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *bias_grad) { double epsilon = static_cast<double>(epsilon_f); const DataLayout data_layout = paddle::framework::StringToDataLayout(data_layout_str); const auto *d_y = &y_grad; auto *d_x = x_grad; auto *d_scale = scale_grad; auto *d_bias = bias_grad; use_global_stats = is_test || use_global_stats; const auto &x_dims = x.dims(); PADDLE_ENFORCE_EQ( x_dims.size() >= 2 && x_dims.size() <= 5, true, phi::errors::InvalidArgument( "The size of input's dimensions should be between 2 and 5." "But received: the size of input's dimensions is [%d]," "the dimensions of input is [%s]", x_dims.size(), x_dims)); int N, C, H, W, D; phi::funcs::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); // init output if (d_x) { ctx.template Alloc<T>(d_x); } if (d_scale && d_bias) { ctx.template Alloc<BatchNormParamType<T>>(d_scale); ctx.template Alloc<BatchNormParamType<T>>(d_bias); } PADDLE_ENFORCE_EQ( scale.dims().size(), 1UL, phi::errors::InvalidArgument( "The size of scale's dimensions must equal to 1. But received: " "the size of scale's dimensions is [%d], the dimensions of scale " "is [%s].", scale.dims().size(), scale.dims())); PADDLE_ENFORCE_EQ( scale.dims()[0], C, phi::errors::InvalidArgument( "The first dimension of scale must equal to Channels[%d]. But " "received: the first dimension of scale is [%d]", C, scale.dims()[0])); auto dtype = paddle::platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP auto compute_format = data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; // TODO(wangran16): wait for MIOpen to improve the performance of BN // HIP do not support compute format of NHWC // auto compute_format = DataLayout::kNCHW; #else const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent && (reserve_space.get_ptr() != nullptr); auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; #endif DenseTensor transformed_x(x.type()); DenseTensor transformed_d_y(d_y->type()); DenseTensor transformed_d_x; if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW && x_dims.size() > 2) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x); TransToChannelFirst<Context, T>(ctx, &x, &transformed_x); ResizeToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y); TransToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y); if (d_x) { ResizeToChannelFirst<Context, T>(ctx, d_x, &transformed_d_x); } } else { transformed_x.ShareDataWith(x); transformed_d_y.ShareDataWith(*d_y); if (d_x) { transformed_d_x.ShareDataWith(*d_x); } } std::vector<int> dims; std::vector<int> strides; if (compute_format == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * C * D, 1, W * D * C, D * C, C}; } const int num = transformed_x.numel(); #ifdef HIPCC const int block = 256; #else const int block = 512; #endif int max_threads = ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = std::min(C, max_blocks); auto stream = ctx.stream(); InplaceHelper<T> inplace_functor; if (!use_global_stats) { if ((N * H * W * D) == 1) { if (d_x) { paddle::framework::TensorCopy(*d_y, ctx.GetPlace(), d_x); } phi::funcs::SetConstant<Context, BatchNormParamType<T>> functor; functor(ctx, d_scale, static_cast<BatchNormParamType<T>>(0)); functor(ctx, d_bias, static_cast<BatchNormParamType<T>>(0)); return; } // ------------------- cudnn descriptors --------------------- #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // miopenTensorDescriptor_t data_desc_; // miopenTensorDescriptor_t bn_param_desc_; // miopenBatchNormMode_t mode_; // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&data_desc_)); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_)); #else cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnCreateTensorDescriptor( &bn_param_desc_)); #endif if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // mode_ = miopenBNSpatial; #elif CUDNN_VERSION_MIN(7, 0, 1) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #endif // CUDNN_VERSION_MIN(7, 0, 1) #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( // data_desc_, CudnnDataType<T>::type, // x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()), // const_cast<int *>(strides.data()))); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_, // data_desc_, mode_)); #else PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); #endif const auto *saved_mean_data = saved_mean.template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_variance.template data<BatchNormParamType<T>>(); if (is_inplace) { inplace_functor(compute_format, transformed_x.data<T>(), scale.template data<BatchNormParamType<T>>(), bias.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, epsilon, C, H * W * D, num, transformed_x.data<T>(), grid2, block, stream); } // This branch calls CUDNN APIs if (d_x && d_scale && d_bias) { #ifdef PADDLE_WITH_HIP if (compute_format == DataLayout::kNCHW) { BNBackward<T, block, DataLayout::kNCHW> <<<grid2, block, 0, ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } else { BNBackward<T, block, DataLayout::kNHWC> <<<grid2, block, 0, ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenBatchNormalizationBackward( // dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), // CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), // CudnnDataType<T>::kZero(), data_desc_, // transformed_x.template data<T>(), data_desc_, // transformed_d_y.template data<T>(), data_desc_, // transformed_d_x.template mutable_data<T>(ctx.GetPlace()), // bn_param_desc_, scale->template data<BatchNormParamType<T>>(), // d_scale->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace()), // d_bias->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace()), // epsilon, saved_mean_data, saved_var_data)); #else // CUDNN PER_ACTIVATION mode only support small batch size const size_t CUDNN_PER_ACTIVATION_THRESHOLD = 131070; const bool use_native_kernel = (x_dims.size() == 2 && N >= CUDNN_PER_ACTIVATION_THRESHOLD); if (use_native_kernel) { if (compute_format == DataLayout::kNCHW) { BNBackward<T, block, DataLayout::kNCHW> <<<grid2, block, 0, ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } else { BNBackward<T, block, DataLayout::kNHWC> <<<grid2, block, 0, ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } } else { #if CUDNN_VERSION_MIN(7, 4, 1) size_t workspace_size = 0; void *workspace_ptr = nullptr; DenseTensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload:: cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/ctx.cudnn_handle(), /*mode=*/mode_, /*bnIps=*/CUDNN_BATCHNORM_OPS_BN, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/nullptr, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/nullptr, /*sizeInBytes=*/&workspace_size)); workspace_tensor.Resize({static_cast<int64_t>(workspace_size)}); workspace_ptr = static_cast<void *>(ctx.template Alloc<uint8_t>(&workspace_tensor)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/CUDNN_BATCHNORM_OPS_BN, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/transformed_x.template data<T>(), /*yDesc=*/nullptr, /*yData=*/nullptr, /*dyDesc=*/data_desc_, /*dyData=*/transformed_d_y.template data<T>(), /*dzDesc=*/nullptr, /*dzData=*/nullptr, /*dxDesc=*/data_desc_, /*dxData=*/ctx.template Alloc<T>(&transformed_d_x), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale.template data<BatchNormParamType<T>>(), /*bnBiasData=*/nullptr, /*dBnScaleData=*/ ctx.template Alloc<BatchNormParamType<T>>(d_scale), /*dBnBiasData=*/ ctx.template Alloc<BatchNormParamType<T>>(d_bias), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesc=*/nullptr, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/ const_cast<uint8_t *>(reserve_space->template data<uint8_t>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); #else PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnBatchNormalizationBackward( ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_d_y.template data<T>(), data_desc_, ctx.template Alloc<T>(&transformed_d_x), bn_param_desc_, scale.template data<BatchNormParamType<T>>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias), epsilon, saved_mean_data, saved_var_data)); #endif // CUDNN_VERSION_MIN(7, 4, 1) } #endif if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform batchnorm output from NCHW to NHWC"; TransToChannelLast<Context, T>(ctx, &transformed_d_x, d_x); } } else { // This branch call CUDA kernels if (compute_format == DataLayout::kNCHW) { if (d_x) { BNBackwardData<T, block, phi::DataLayout::kNCHW> <<<grid2, block, 0, ctx.stream()>>>( d_y->data<T>(), scale.data<BatchNormParamType<T>>(), saved_mean_data, x.data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW> <<<grid2, block, 0, stream>>>( d_y->data<T>(), x.data<T>(), saved_mean_data, saved_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { BNBackwardData<T, block, phi::DataLayout::kNHWC> <<<grid2, block, 0, ctx.stream()>>>( d_y->data<T>(), scale.data<BatchNormParamType<T>>(), saved_mean_data, x.data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC> <<<grid2, block, 0, stream>>>( d_y->data<T>(), x.data<T>(), saved_mean_data, saved_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // clean when exit. // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(data_desc_)); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_)); #else // clean when exit. PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDestroyTensorDescriptor( bn_param_desc_)); #endif } else { const auto *running_mean = mean.get_ptr(); const auto *running_var = variance.get_ptr(); const auto *running_mean_data = running_mean->template data<BatchNormParamType<T>>(); const auto *running_var_data = running_var->template data<BatchNormParamType<T>>(); if (is_inplace) { auto px = x; inplace_functor(data_layout, ctx.template Alloc<T>(&px), scale.template data<BatchNormParamType<T>>(), bias.template data<BatchNormParamType<T>>(), running_mean_data, running_var_data, epsilon, C, H * W * D, num, x.data<T>(), grid2, block, stream); } if (compute_format == DataLayout::kNCHW) { if (d_x) { KeBNBackwardData<T, phi::DataLayout::kNCHW> <<<grid1, block, 0, stream>>>(d_y->data<T>(), scale.data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW> <<<grid2, block, 0, stream>>>( d_y->data<T>(), x.data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { KeBNBackwardData<T, phi::DataLayout::kNHWC> <<<grid1, block, 0, stream>>>(d_y->data<T>(), scale.data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC> <<<grid2, block, 0, stream>>>( d_y->data<T>(), x.data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } } template <typename T, typename Context> void BatchNormGradKernel(const Context &dev_ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const paddle::optional<DenseTensor> &reserve_space, const DenseTensor &y_grad, float momentum, float epsilon, const std::string &data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *bias_grad) { BatchNormGradRawKernel<T, Context>(dev_ctx, x, scale, bias, mean, variance, saved_mean, saved_variance, reserve_space, y_grad, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics, fuse_with_relu, false, x_grad, scale_grad, bias_grad); } template <typename T, typename Context> void BatchNormDoubleGradKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const DenseTensor &y_grad, const DenseTensor &x_grad_grad, const DenseTensor &scale_grad_grad, const DenseTensor &bias_grad_grad, float momentum, float epsilon, const std::string &data_layout_str, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *y_grad_grad) { PADDLE_ENFORCE_EQ(is_test, false, phi::errors::InvalidArgument( "`is_test = True` CANNOT be used in train program. If " "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); const DataLayout data_layout = paddle::framework::StringToDataLayout(data_layout_str); const DenseTensor *running_mean = nullptr; const DenseTensor *running_variance = nullptr; if (use_global_stats) { running_mean = mean.get_ptr(); running_variance = variance.get_ptr(); } paddle::operators::NormDoubleGradFunctor<Context, T>(ctx, data_layout, &x, &scale, &y_grad, &saved_mean, &saved_variance, running_mean, running_variance, epsilon, use_global_stats, &x_grad_grad, &scale_grad_grad, &bias_grad_grad, x_grad, scale_grad, y_grad_grad); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(batch_norm_grad, GPU, ALL_LAYOUT, phi::BatchNormGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(batch_norm_grad_raw, GPU, ALL_LAYOUT, phi::BatchNormGradRawKernel, float, phi::dtype::float16) {} #else PD_REGISTER_KERNEL(batch_norm_grad, GPU, ALL_LAYOUT, phi::BatchNormGradKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad } } PD_REGISTER_KERNEL(batch_norm_grad_raw, GPU, ALL_LAYOUT, phi::BatchNormGradRawKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad } } #endif #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(batch_norm_grad_grad, GPU, ALL_LAYOUT, phi::BatchNormDoubleGradKernel, float, double) {} #else PD_REGISTER_KERNEL(batch_norm_grad_grad, GPU, ALL_LAYOUT, phi::BatchNormDoubleGradKernel, float, double) {} #endif
60bb44d266f2d5ca501fb30aa4d9d58b3cac7ea9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 30-May-2011 22:03:11 // // user function __device__ #include "res_calc.h" // CUDA kernel function __global__ void op_cuda_res_calc( double *ind_arg0, int *ind_arg0_maps, double *ind_arg1, int *ind_arg1_maps, double *ind_arg2, int *ind_arg2_maps, double *ind_arg3, int *ind_arg3_maps, short *arg0_maps, short *arg1_maps, short *arg2_maps, short *arg3_maps, short *arg4_maps, short *arg5_maps, short *arg6_maps, short *arg7_maps, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors) { double arg6_l[4]; double arg7_l[4]; __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ int *ind_arg1_map, ind_arg1_size; __shared__ int *ind_arg2_map, ind_arg2_size; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ double *ind_arg0_s; __shared__ double *ind_arg1_s; __shared__ double *ind_arg2_s; __shared__ double *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*4]; ind_arg1_size = ind_arg_sizes[1+blockId*4]; ind_arg2_size = ind_arg_sizes[2+blockId*4]; ind_arg3_size = ind_arg_sizes[3+blockId*4]; ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4]; ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4]; ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4]; ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2); ind_arg1_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4); ind_arg2_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1); ind_arg3_s = (double *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x) ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4]; for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x) ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1]; for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3_s[n] = ZERO_double; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) { int col2 = -1; if (n<nelem) { // initialise local variables for (int d=0; d<4; d++) arg6_l[d] = ZERO_double; for (int d=0; d<4; d++) arg7_l[d] = ZERO_double; // user-supplied kernel call res_calc( ind_arg0_s+arg0_maps[n+offset_b]*2, ind_arg0_s+arg1_maps[n+offset_b]*2, ind_arg1_s+arg2_maps[n+offset_b]*4, ind_arg1_s+arg3_maps[n+offset_b]*4, ind_arg2_s+arg4_maps[n+offset_b]*1, ind_arg2_s+arg5_maps[n+offset_b]*1, arg6_l, arg7_l ); col2 = colors[n+offset_b]; } // store local variables int arg6_map = arg6_maps[n+offset_b]; int arg7_map = arg7_maps[n+offset_b]; for (int col=0; col<ncolor; col++) { if (col2==col) { for (int d=0; d<4; d++) ind_arg3_s[d+arg6_map*4] += arg6_l[d]; for (int d=0; d<4; d++) ind_arg3_s[d+arg7_map*4] += arg7_l[d]; } __syncthreads(); } } // apply pointered write/increment for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } // host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7 ){ int nargs = 8; op_arg args[8] = {arg0,arg1,arg2,arg3,arg4,arg5,arg6,arg7}; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc \n"); } // get plan #ifdef OP_PART_SIZE_1 int part_size = OP_PART_SIZE_1; #else int part_size = OP_part_size; #endif op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timers(&cpu_t1, &wall_t1); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif int nblocks = Plan->ncolblk[col]; int nshared = Plan->nshared; hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0, (double *)arg0.data_d, Plan->ind_maps[0], (double *)arg2.data_d, Plan->ind_maps[1], (double *)arg4.data_d, Plan->ind_maps[2], (double *)arg6.data_d, Plan->ind_maps[3], Plan->loc_maps[0], Plan->loc_maps[1], Plan->loc_maps[2], Plan->loc_maps[3], Plan->loc_maps[4], Plan->loc_maps[5], Plan->loc_maps[6], Plan->loc_maps[7], Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_res_calc execution failed\n"); block_offset += nblocks; } // update kernel record op_timers(&cpu_t2, &wall_t2); op_timing_realloc(2); OP_kernels[2].name = name; OP_kernels[2].count += 1; OP_kernels[2].time += wall_t2 - wall_t1; OP_kernels[2].transfer += Plan->transfer; OP_kernels[2].transfer2 += Plan->transfer2; }
60bb44d266f2d5ca501fb30aa4d9d58b3cac7ea9.cu
// // auto-generated by op2.m on 30-May-2011 22:03:11 // // user function __device__ #include "res_calc.h" // CUDA kernel function __global__ void op_cuda_res_calc( double *ind_arg0, int *ind_arg0_maps, double *ind_arg1, int *ind_arg1_maps, double *ind_arg2, int *ind_arg2_maps, double *ind_arg3, int *ind_arg3_maps, short *arg0_maps, short *arg1_maps, short *arg2_maps, short *arg3_maps, short *arg4_maps, short *arg5_maps, short *arg6_maps, short *arg7_maps, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors) { double arg6_l[4]; double arg7_l[4]; __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ int *ind_arg1_map, ind_arg1_size; __shared__ int *ind_arg2_map, ind_arg2_size; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ double *ind_arg0_s; __shared__ double *ind_arg1_s; __shared__ double *ind_arg2_s; __shared__ double *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*4]; ind_arg1_size = ind_arg_sizes[1+blockId*4]; ind_arg2_size = ind_arg_sizes[2+blockId*4]; ind_arg3_size = ind_arg_sizes[3+blockId*4]; ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4]; ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4]; ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4]; ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg0_size*sizeof(double)*2); ind_arg1_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg1_size*sizeof(double)*4); ind_arg2_s = (double *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg2_size*sizeof(double)*1); ind_arg3_s = (double *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x) ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4]; for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x) ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1]; for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3_s[n] = ZERO_double; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) { int col2 = -1; if (n<nelem) { // initialise local variables for (int d=0; d<4; d++) arg6_l[d] = ZERO_double; for (int d=0; d<4; d++) arg7_l[d] = ZERO_double; // user-supplied kernel call res_calc( ind_arg0_s+arg0_maps[n+offset_b]*2, ind_arg0_s+arg1_maps[n+offset_b]*2, ind_arg1_s+arg2_maps[n+offset_b]*4, ind_arg1_s+arg3_maps[n+offset_b]*4, ind_arg2_s+arg4_maps[n+offset_b]*1, ind_arg2_s+arg5_maps[n+offset_b]*1, arg6_l, arg7_l ); col2 = colors[n+offset_b]; } // store local variables int arg6_map = arg6_maps[n+offset_b]; int arg7_map = arg7_maps[n+offset_b]; for (int col=0; col<ncolor; col++) { if (col2==col) { for (int d=0; d<4; d++) ind_arg3_s[d+arg6_map*4] += arg6_l[d]; for (int d=0; d<4; d++) ind_arg3_s[d+arg7_map*4] += arg7_l[d]; } __syncthreads(); } } // apply pointered write/increment for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } // host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7 ){ int nargs = 8; op_arg args[8] = {arg0,arg1,arg2,arg3,arg4,arg5,arg6,arg7}; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc \n"); } // get plan #ifdef OP_PART_SIZE_1 int part_size = OP_PART_SIZE_1; #else int part_size = OP_part_size; #endif op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timers(&cpu_t1, &wall_t1); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif int nblocks = Plan->ncolblk[col]; int nshared = Plan->nshared; op_cuda_res_calc<<<nblocks,nthread,nshared>>>( (double *)arg0.data_d, Plan->ind_maps[0], (double *)arg2.data_d, Plan->ind_maps[1], (double *)arg4.data_d, Plan->ind_maps[2], (double *)arg6.data_d, Plan->ind_maps[3], Plan->loc_maps[0], Plan->loc_maps[1], Plan->loc_maps[2], Plan->loc_maps[3], Plan->loc_maps[4], Plan->loc_maps[5], Plan->loc_maps[6], Plan->loc_maps[7], Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol); cutilSafeCall(cudaThreadSynchronize()); cutilCheckMsg("op_cuda_res_calc execution failed\n"); block_offset += nblocks; } // update kernel record op_timers(&cpu_t2, &wall_t2); op_timing_realloc(2); OP_kernels[2].name = name; OP_kernels[2].count += 1; OP_kernels[2].time += wall_t2 - wall_t1; OP_kernels[2].transfer += Plan->transfer; OP_kernels[2].transfer2 += Plan->transfer2; }
184eb4c4373def1f1b633c2e14b1d5b73ef98763.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdio> #include <cmath> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "common.h" #include <hip/hip_runtime.h> using namespace std; // input - input image one dimensional array // ouput - output image one dimensional array // width, height - width and height of the images // colorWidthStep - number of color bytes (cols * colors) // grayWidthStep - number of gray bytes __global__ void bgr_to_gray_kernel(unsigned char* input, unsigned char* output, int width, int height, int colorWidthStep, int grayWidthStep) { // 2D Index of current thread const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; // Only valid threads perform memory I/O if ((xIndex < width) && (yIndex < height)) { //Location of colored pixel in input const int color_tid = yIndex * colorWidthStep + (3 * xIndex); //Location of gray pixel in output const int gray_tid = yIndex * grayWidthStep + xIndex; const unsigned char blue = input[color_tid]; const unsigned char green = input[color_tid + 1]; const unsigned char red = input[color_tid + 2]; // The standard NTSC conversion formula that is used for calculating the effective luminance of a pixel (https://en.wikipedia.org/wiki/Grayscale#Luma_coding_in_video_systems) const float gray = red * 0.3f + green * 0.59f + blue * 0.11f; // Alternatively, use an average //const float gray = (red + green + blue) / 3.f; output[gray_tid] = static_cast<unsigned char>(gray); } } void convert_to_gray(const cv::Mat& input, cv::Mat& output) { cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << endl; // Calculate total number of bytes of input and output image // Step = cols * number of colors size_t colorBytes = input.step * input.rows; size_t grayBytes = output.step * output.rows; unsigned char *d_input, *d_output; // Allocate device memory SAFE_CALL(hipMalloc<unsigned char>(&d_input, colorBytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc<unsigned char>(&d_output, grayBytes), "CUDA Malloc Failed"); // Copy data from OpenCV input image to device memory SAFE_CALL(hipMemcpy(d_input, input.ptr(), colorBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); // Specify a reasonable block size const dim3 block(16, 16); // Calculate grid size to cover the whole image // const dim3 grid((input.cols + block.x - 1) / block.x, (input.rows + block.y - 1) / block.y); const dim3 grid((int)ceil((float)input.cols / block.x), (int)ceil((float)input.rows/ block.y)); printf("bgr_to_gray_kernel<<<(%d, %d) , (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); // Launch the color conversion kernel hipLaunchKernelGGL(( bgr_to_gray_kernel) , dim3(grid), dim3(block) , 0, 0, d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), static_cast<int>(output.step)); // Synchronize to check for any kernel launch errors SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); // Copy back data from destination device meory to OpenCV output image SAFE_CALL(hipMemcpy(output.ptr(), d_output, grayBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); // Free the device memory SAFE_CALL(hipFree(d_input), "CUDA Free Failed"); SAFE_CALL(hipFree(d_output), "CUDA Free Failed"); } int main(int argc, char *argv[]) { string imagePath; if(argc < 2) imagePath = "image.jpg"; else imagePath = argv[1]; // Read input image from the disk cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR); if (input.empty()) { cout << "Image Not Found!" << std::endl; cin.get(); return -1; } //Create output image cv::Mat output(input.rows, input.cols, CV_8UC1); //Call the wrapper function convert_to_gray(input, output); cv::imwrite("output.jpg", output); //Allow the windows to resize //namedWindow("Input", cv::WINDOW_NORMAL); //namedWindow("Output", cv::WINDOW_NORMAL); //Show the input and output //imshow("Input", input); //imshow("Output", output); //Wait for key press //cv::waitKey(); return 0; }
184eb4c4373def1f1b633c2e14b1d5b73ef98763.cu
#include <iostream> #include <cstdio> #include <cmath> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "common.h" #include <cuda_runtime.h> using namespace std; // input - input image one dimensional array // ouput - output image one dimensional array // width, height - width and height of the images // colorWidthStep - number of color bytes (cols * colors) // grayWidthStep - number of gray bytes __global__ void bgr_to_gray_kernel(unsigned char* input, unsigned char* output, int width, int height, int colorWidthStep, int grayWidthStep) { // 2D Index of current thread const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; // Only valid threads perform memory I/O if ((xIndex < width) && (yIndex < height)) { //Location of colored pixel in input const int color_tid = yIndex * colorWidthStep + (3 * xIndex); //Location of gray pixel in output const int gray_tid = yIndex * grayWidthStep + xIndex; const unsigned char blue = input[color_tid]; const unsigned char green = input[color_tid + 1]; const unsigned char red = input[color_tid + 2]; // The standard NTSC conversion formula that is used for calculating the effective luminance of a pixel (https://en.wikipedia.org/wiki/Grayscale#Luma_coding_in_video_systems) const float gray = red * 0.3f + green * 0.59f + blue * 0.11f; // Alternatively, use an average //const float gray = (red + green + blue) / 3.f; output[gray_tid] = static_cast<unsigned char>(gray); } } void convert_to_gray(const cv::Mat& input, cv::Mat& output) { cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << endl; // Calculate total number of bytes of input and output image // Step = cols * number of colors size_t colorBytes = input.step * input.rows; size_t grayBytes = output.step * output.rows; unsigned char *d_input, *d_output; // Allocate device memory SAFE_CALL(cudaMalloc<unsigned char>(&d_input, colorBytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<unsigned char>(&d_output, grayBytes), "CUDA Malloc Failed"); // Copy data from OpenCV input image to device memory SAFE_CALL(cudaMemcpy(d_input, input.ptr(), colorBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); // Specify a reasonable block size const dim3 block(16, 16); // Calculate grid size to cover the whole image // const dim3 grid((input.cols + block.x - 1) / block.x, (input.rows + block.y - 1) / block.y); const dim3 grid((int)ceil((float)input.cols / block.x), (int)ceil((float)input.rows/ block.y)); printf("bgr_to_gray_kernel<<<(%d, %d) , (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); // Launch the color conversion kernel bgr_to_gray_kernel <<<grid, block >>>(d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), static_cast<int>(output.step)); // Synchronize to check for any kernel launch errors SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); // Copy back data from destination device meory to OpenCV output image SAFE_CALL(cudaMemcpy(output.ptr(), d_output, grayBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); // Free the device memory SAFE_CALL(cudaFree(d_input), "CUDA Free Failed"); SAFE_CALL(cudaFree(d_output), "CUDA Free Failed"); } int main(int argc, char *argv[]) { string imagePath; if(argc < 2) imagePath = "image.jpg"; else imagePath = argv[1]; // Read input image from the disk cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR); if (input.empty()) { cout << "Image Not Found!" << std::endl; cin.get(); return -1; } //Create output image cv::Mat output(input.rows, input.cols, CV_8UC1); //Call the wrapper function convert_to_gray(input, output); cv::imwrite("output.jpg", output); //Allow the windows to resize //namedWindow("Input", cv::WINDOW_NORMAL); //namedWindow("Output", cv::WINDOW_NORMAL); //Show the input and output //imshow("Input", input); //imshow("Output", output); //Wait for key press //cv::waitKey(); return 0; }
1f23fbe2ecd1f5e1c8234f3973571f830594cfc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __constant__ float *c_Kernel; __global__ void convolutionColumnsKernel_down_smp( float *d_Dst, float *d_Src, int imageW, int imageH, int n_imageH, int pitch, int filter_Rad, int Halo_steps ) { extern __shared__ float s_Data[]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * 2 * COLUMNS_RESULT_STEPS - Halo_steps) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const int baseY1 = (blockIdx.y * COLUMNS_RESULT_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; if (baseX < imageW) { d_Src += baseY * pitch + baseX; d_Dst += baseY1 * pitch + baseX; //Upper halo #pragma unroll for (int i = 0; i < Halo_steps; i++) { s_Data[(threadIdx.x*(2 * COLUMNS_RESULT_STEPS + 2 * Halo_steps) *COLUMNS_BLOCKDIM_Y) + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y >= 0) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo + Main data #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * COLUMNS_RESULT_STEPS + Halo_steps; i++) { s_Data[(threadIdx.x*(2 * COLUMNS_RESULT_STEPS + 2 * Halo_steps) *COLUMNS_BLOCKDIM_Y) + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y < imageH) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = 0; i < COLUMNS_RESULT_STEPS; ++i) { float sum = 0; if (baseY1 + i * COLUMNS_BLOCKDIM_Y < n_imageH) { #pragma unroll for (int j = -filter_Rad; j <= filter_Rad; ++j) { sum += c_Kernel[filter_Rad - j] * s_Data[(threadIdx.x*(2 * COLUMNS_RESULT_STEPS + 2 * Halo_steps) *COLUMNS_BLOCKDIM_Y) + 2 * threadIdx.y + 2 * i * COLUMNS_BLOCKDIM_Y + Halo_steps * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } } }
1f23fbe2ecd1f5e1c8234f3973571f830594cfc9.cu
#include "includes.h" __constant__ float *c_Kernel; __global__ void convolutionColumnsKernel_down_smp( float *d_Dst, float *d_Src, int imageW, int imageH, int n_imageH, int pitch, int filter_Rad, int Halo_steps ) { extern __shared__ float s_Data[]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * 2 * COLUMNS_RESULT_STEPS - Halo_steps) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const int baseY1 = (blockIdx.y * COLUMNS_RESULT_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; if (baseX < imageW) { d_Src += baseY * pitch + baseX; d_Dst += baseY1 * pitch + baseX; //Upper halo #pragma unroll for (int i = 0; i < Halo_steps; i++) { s_Data[(threadIdx.x*(2 * COLUMNS_RESULT_STEPS + 2 * Halo_steps) *COLUMNS_BLOCKDIM_Y) + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y >= 0) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo + Main data #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * COLUMNS_RESULT_STEPS + Halo_steps; i++) { s_Data[(threadIdx.x*(2 * COLUMNS_RESULT_STEPS + 2 * Halo_steps) *COLUMNS_BLOCKDIM_Y) + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y < imageH) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = 0; i < COLUMNS_RESULT_STEPS; ++i) { float sum = 0; if (baseY1 + i * COLUMNS_BLOCKDIM_Y < n_imageH) { #pragma unroll for (int j = -filter_Rad; j <= filter_Rad; ++j) { sum += c_Kernel[filter_Rad - j] * s_Data[(threadIdx.x*(2 * COLUMNS_RESULT_STEPS + 2 * Halo_steps) *COLUMNS_BLOCKDIM_Y) + 2 * threadIdx.y + 2 * i * COLUMNS_BLOCKDIM_Y + Halo_steps * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } } }
f1e1710020a59d98564300f3bad5cd9018ff83d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //********************************************************************************************************************************************************// //*********************************************** Proton CT Preprocessing and Image Reconstruction Code *************************************************// //********************************************************************************************************************************************************// #include "pCT_Reconstruction.h" //********************************************************************************************************************************************************// //********************************************************************** Host Code ***********************************************************************// //********************************************************************************************************************************************************// // Preprocessing setup and initializations void assign_SSD_positions(); void initializations(); void count_histories_old(); void count_histories_v0(); void count_histories_v1(); void reserve_vector_capacity(); // Preprocessing routines void iterative_data_read_old( int, int, int ); void iterative_data_read_v0( int, int, int ); void iterative_data_read_v1( int, int, int ); void recon_volume_intersections( int ); void bin_valid_histories( int ); void calculate_means(); void sum_differences( int, int ); void calculate_std_devs(); void statistical_cuts( int, int ); void initialize_sinogram(); void construct_sinogram(); void filter(); void backprojection(); // Hull-Detection void initialize_SC_hull( bool*&, bool*& ); void initialize_MSC_hull( int*&, int*& ); void initialize_SM_hull( int*&, int*& ); void initialize_float_image( float*&, float*& ); void SC( int ); void MSC( int ); void SM( int ); void MSC_differences(); void SM_differences(); void MSC_threshold(); void SM_threshold(); void averaging_filter( bool*&, bool*&, int); // MLP void create_MLP_test_image(); // In development void MLP_test(); // In development // Write arrays/vectors to file void write_bool_array_to_files( char*, const char*, const char*, bool*, int, int, int ); void write_bool_array_to_file( char*, const char*, const char* , bool*, int, int, int ); void write_integer_array_to_files( char*, const char*, const char* , int*, int, int, int ); void write_integer_array_to_file( char*, const char*, const char* , int*, int, int, int ); void write_float_array_to_files( char*, const char*, const char* , float*&, int, int, int ); void write_float_array_to_file( char*, const char*, const char* , float*, int, int, int ); void write_float_vector_to_file( char*, const char*, const char* , vector<float>, int, int, int ); // Memory transfers and allocations/deallocations void post_cut_memory_clean(); void resize_vectors( int ); void shrink_vectors( int ); void initial_processing_memory_clean(); // Helper Functions bool bad_data_angle( int ); int calculate_x_voxel(float, int, float); int calculate_y_voxel(float, int, float); int calculate_slice(float, int, float); // New routine test functions void test_func(); //********************************************************************************************************************************************************// //****************************************************************** Device (GPU) Code *******************************************************************// //********************************************************************************************************************************************************// // Preprocessing routines __global__ void recon_volume_intersections_kernel( int, int*, bool*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*); __global__ void bin_valid_histories_kernel( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float* ); __global__ void calculate_means_kernel( int*, float*, float*, float* ); __global__ void sum_differences_kernel( int, int*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float* ); __global__ void calculate_std_devs_kernel( int*, float*, float*, float* ); __global__ void statistical_cuts_kernel( int, int*, int*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, bool*, float*, float* ); __global__ void construct_sinogram_kernel( int*, float* ); __global__ void filter_kernel( float*, float* ); // Hull-Detection __device__ void voxel_walk( bool*&, float, float, float, float, float, float ); __global__ void SC_kernel( int, bool*, int*, bool*, float*, float*, float*, float*, float*, float*, float* ); __global__ void MSC_kernel( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float* ); __global__ void SM_kernel( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float* ); __global__ void MSC_differences_kernel( int*, int* ); __global__ void SM_differences_kernel( int*, int* ); __global__ void SM_threshold_search_kernel( int*, int* ); __global__ void MSC_threshold_kernel( int* ); __global__ void SM_threshold_kernel( int*, int* ); __global__ void carve_differences( int*, int* ); __global__ void averaging_filter_kernel( bool*, int, float ); // New routine test functions __global__ void test_func_kernel( int*, int); /************************************************************************************************************************************************************/ /******************************************************************** Program Main **************************************************************************/ /************************************************************************************************************************************************************/ int main(int argc, char** argv) { char user_response[20]; /* puts("Hit enter to stop..."); fgets(user_response, sizeof(user_response), stdin); exit(1); */ /********************************************************************************************/ /* Start the Execution Timing Clock */ /********************************************************************************************/ clock_t start,end; start = clock(); /********************************************************************************************/ /* Initialize Hull Detection Images and Transfer Them to the GPU */ /********************************************************************************************/ if( SC_ON ) initialize_SC_hull( SC_image_h, SC_image_d ); if( MSC_ON ) initialize_MSC_hull( MSC_image_h, MSC_image_d ); if( SM_ON ) initialize_SM_hull( SM_image_h, SM_image_d ); /********************************************************************************************/ /* Read the u-Coordinates of the Detector Planes from the Config File, Allocate and */ /* Initialize Statistical Data Arrays, and Count the Number of Histories Per File, */ /* Projection, Gantry Angle, Scan, and Total. Request Input from User to Continue. */ /********************************************************************************************/ puts("Reading tracker plane positions and initializing storage arrays..."); if( CONFIG_FILE) assign_SSD_positions(); // Read the detector plane u-coordinates from config file initializations(); // allocate and initialize host and GPU memory for binning if( VERSION_OLD ) count_histories_old(); // count the number of histories per file, per scan, total, etc. else if( VERSION_0 ) count_histories_v0(); // count the number of histories per file, per scan, total, etc. else count_histories_v1(); /********************************************************************************************/ /* Iteratively Read and Process Data One Chunk at a Time. There are at Most */ /* MAX_GPU_HISTORIES Per Chunk (i.e. Iteration). On Each Iteration: */ /* (1) Read Data from File */ /* (2) Determine Which Histories Traverse the Reconstruction Volume and Store this */ /* Information in a Boolean Array */ /* (3) Determine Which Bin Each History Belongs to */ /* (4) Use the Boolean Array to Determine Which Histories to Keep and then Push */ /* the Intermediate Data from these Histories onto the Permanent Storage Vectors */ /* (5) Free Up Temporary Host/GPU Array Memory Allocated During Iteration */ /********************************************************************************************/ puts("Iteratively Reading Data from Hard Disk"); puts("Removing Proton Histories that Don't Pass Through the Reconstruction Volume"); puts("Binning the Data from Those that Did..."); int start_file_num = 0, end_file_num = 0, histories_to_process = 0; while( start_file_num != NUM_FILES ) { while( end_file_num < NUM_FILES ) { if( histories_to_process + histories_per_file[end_file_num] < MAX_GPU_HISTORIES ) histories_to_process += histories_per_file[end_file_num]; else break; end_file_num++; } if( VERSION_OLD ) iterative_data_read_old( histories_to_process, start_file_num, end_file_num - 1 ); else if( VERSION_0 ) iterative_data_read_v0( histories_to_process, start_file_num, end_file_num - 1 ); else iterative_data_read_v1( histories_to_process, start_file_num, end_file_num - 1 ); recon_volume_intersections( histories_to_process ); bin_valid_histories( histories_to_process ); if( SC_ON && (!bad_data_angle( gantry_angle_h[0] ) || !RESTRICTED_ANGLES ) ) SC( histories_to_process ); if( MSC_ON ) MSC( histories_to_process ); if( SM_ON ) SM( histories_to_process ); initial_processing_memory_clean(); start_file_num = end_file_num; histories_to_process = 0; } exit(1); /********************************************************************************************/ /* Shrink vectors so capacity reduced to size, which is number of histories remaining after */ /* histories that didn't intersect reconstruction volume were ignored */ /********************************************************************************************/ shrink_vectors( recon_vol_histories ); /********************************************************************************************/ /* Perform Thresholding on MSC and SM Hulls and Write All Hull Images to File */ /********************************************************************************************/ puts("\nPerforming Hull Thresholding and Writing Hull Images to Disk..."); if( SC_ON ) { hipMemcpy(SC_image_h, SC_image_d, MEM_SIZE_IMAGE_BOOL, hipMemcpyDeviceToHost); write_bool_array_to_file("x_sc", output_directory, output_folder, SC_image_h, COLUMNS, ROWS, SLICES ); } if( MSC_ON ) MSC_threshold(); if( SM_ON ) SM_threshold(); exit(1); /********************************************************************************************/ /* Calculate the Mean WEPL, Relative ut-Angle, and Relative uv-Angle for Each Bin and Count */ /* the Number of Histories in Each Bin */ ///********************************************************************************************/ puts("Calculating the Mean for Each Bin Before Cuts..."); calculate_means(); /********************************************************************************************/ /* Calculate the Standard Deviation in WEPL, Relative ut-Angle, and Relative uv-Angle for */ /* Each Bin. Iterate Through the Valid History Vectors One Chunk at a Time, With at Most */ /* MAX_GPU_HISTORIES Per Chunk, and Calculate the Difference Between the Mean WEPL and WEPL,*/ /* Mean Relative ut-Angle and Relative ut-Angle, and Mean Relative uv-Angle and Relative */ /* uv-Angle for Each History. The Standard Deviation is then Found By Calculating the Sum */ /* of these Differences for Each Bin and Dividing it by the Number of Histories in the Bin */ /********************************************************************************************/ puts("Summing up the Difference Between Individual Measurements and the Mean for Each Bin..."); int remaining_histories = recon_vol_histories; int start_position = 0; while( remaining_histories > 0 ) { if( remaining_histories > MAX_GPU_HISTORIES ) histories_to_process = MAX_GPU_HISTORIES; else histories_to_process = remaining_histories; sum_differences( start_position, histories_to_process ); remaining_histories -= MAX_GPU_HISTORIES; start_position += MAX_GPU_HISTORIES; } puts("Calculating Standard Deviations for Each Bin..."); calculate_std_devs(); /********************************************************************************************/ /* Allocate Memory for the Sinogram on the Host, Initialize it to Zeros, Allocate Memory */ /* for it on the GPU, then Transfer the Initialized Sinogram to the GPU */ /********************************************************************************************/ initialize_sinogram(); /********************************************************************************************/ /* Iterate Through the Valid History Vectors One Chunk at a Time, With at Most */ /* MAX_GPU_HISTORIES Per Chunk, and Perform Statistical Cuts */ /********************************************************************************************/ puts("Performing Statistical Cuts..."); remaining_histories = recon_vol_histories, start_position = 0; while( remaining_histories > 0 ) { if( remaining_histories > MAX_GPU_HISTORIES ) histories_to_process = MAX_GPU_HISTORIES; else histories_to_process = remaining_histories; statistical_cuts( start_position, histories_to_process ); remaining_histories -= MAX_GPU_HISTORIES; start_position += MAX_GPU_HISTORIES; } printf("%d out of %d histories passed cuts\n", post_cut_histories, total_histories ); /********************************************************************************************/ /* Free the host memory for the bin number array and gpu memory for the statistics arrays */ /* and shrink the vectors to fit exactly the number of histories that passed cuts */ /********************************************************************************************/ puts("Freeing unnecessary memory and shrinking vectors to just fit remaining histories..."); post_cut_memory_clean(); resize_vectors( post_cut_histories ); shrink_vectors( post_cut_histories ); /********************************************************************************************/ /* Recalculate the Mean WEPL for Each Bin Using the Histories Remaining After Cuts and Use */ /* these to Produce the Sinogram */ ///********************************************************************************************/ puts("Calculating the Elements of the Sinogram..."); construct_sinogram(); /********************************************************************************************/ /* Perform Filtered Backprojection and Write FBP Hull to Disk */ /********************************************************************************************/ if( FBP_ON ) { filter(); backprojection(); } /********************************************************************************************/ /* End Program Execution Timing Clock and Print the Total Execution Time to Console Window */ /********************************************************************************************/ //end = clock(); //printf("Total execution time : %3f\n",(double)(end-start)/1000); /********************************************************************************************/ /* Program Has Finished Execution. Require the User to Hit the Enter Key to Terminate the */ /* Program and Close the Terminal/Console Window */ /********************************************************************************************/ puts("Preprocessing complete. Press any key to close the console window..."); fgets(user_response, sizeof(user_response), stdin); } /************************************************************************************************************************************************************/ /******************************************************** Preprocessing Setup and Initializations ***********************************************************/ /************************************************************************************************************************************************************/ void assign_SSD_positions() //HERE THE COORDINATES OF THE DETECTORS PLANES ARE LOADED, THE CONFIG FILE IS CREATED BY FORD (RWS) { char user_response[20]; char configFilename[512]; sprintf(configFilename, "%s%s\\scan.cfg", input_directory, input_folder); if( DEBUG_TEXT_ON ) printf("Opening config file %s...\n", configFilename); ifstream configFile(configFilename); if( !configFile.is_open() ) { printf("ERROR: config file not found at %s!\n", configFilename); fputs("Didn't Find File", stdout); fflush(stdout); printf("text = \"%s\"\n", user_response); fgets(user_response, sizeof(user_response), stdin); exit(1); } else { fputs("Found File", stdout); fflush(stdout); printf("user_response = \"%s\"\n", user_response); } if( DEBUG_TEXT_ON ) puts("Reading Tracking Plane Positions..."); for( int i = 0; i < 8; i++ ) { configFile >> SSD_u_Positions[i]; if( DEBUG_TEXT_ON ) printf("SSD_u_Positions[%d] = %3f", i, SSD_u_Positions[i]); } configFile.close(); } void initializations() { for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ ) histories_per_scan[scan_number] = 0; histories_per_file = (int*) calloc( NUM_SCANS * GANTRY_ANGLES, sizeof(int) ); histories_per_gantry_angle = (int*) calloc( GANTRY_ANGLES, sizeof(int) ); recon_vol_histories_per_projection = (int*) calloc( GANTRY_ANGLES, sizeof(int) ); bin_counts_h = (int*) calloc( NUM_BINS, sizeof(int) ); mean_WEPL_h = (float*) calloc( NUM_BINS, sizeof(float) ); mean_rel_ut_angle_h = (float*) calloc( NUM_BINS, sizeof(float) ); mean_rel_uv_angle_h = (float*) calloc( NUM_BINS, sizeof(float) ); stddev_rel_ut_angle_h = (float*) calloc( NUM_BINS, sizeof(float) ); stddev_rel_uv_angle_h = (float*) calloc( NUM_BINS, sizeof(float) ); stddev_WEPL_h = (float*) calloc( NUM_BINS, sizeof(float) ); hipMalloc((void**) &bin_counts_d, MEM_SIZE_BINS_INTS ); hipMalloc((void**) &mean_WEPL_d, MEM_SIZE_BINS_FLOATS ); hipMalloc((void**) &mean_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS ); hipMalloc((void**) &mean_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS ); hipMalloc((void**) &stddev_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS ); hipMalloc((void**) &stddev_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS ); hipMalloc((void**) &stddev_WEPL_d, MEM_SIZE_BINS_FLOATS ); hipMemcpy( bin_counts_d, bin_counts_h, MEM_SIZE_BINS_INTS, hipMemcpyHostToDevice ); hipMemcpy( mean_WEPL_d, mean_WEPL_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice ); hipMemcpy( mean_rel_ut_angle_d, mean_rel_ut_angle_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice ); hipMemcpy( mean_rel_uv_angle_d, mean_rel_uv_angle_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice ); hipMemcpy( stddev_rel_ut_angle_d, stddev_rel_ut_angle_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice ); hipMemcpy( stddev_rel_uv_angle_d, stddev_rel_uv_angle_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice ); hipMemcpy( stddev_WEPL_d, stddev_WEPL_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice ); } void count_histories_old() { if( DEBUG_TEXT_ON ) printf("Counting histories...\n"); char user_response[20]; char data_filename[128]; int file_size, num_histories, file_number = 0, gantry_position_number = 0; for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ ) { for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ ) { sprintf( data_filename, "%s%s/%s_trans%d_%03d%s", input_directory, input_folder, input_base_name, scan_number, gantry_angle, file_extension ); //printf("Name = %s", data_filename ); FILE *data_file = fopen(data_filename, "rb"); if( data_file == NULL ) { fputs( "Error Opening Data File: Check that the directories are properly named.", stderr ); fgets(user_response, sizeof(user_response), stdin); exit(1); } fseek( data_file, 0, SEEK_END ); file_size = ftell( data_file ); if( BINARY_ENCODING ) { if( file_size % BYTES_PER_HISTORY ) { printf("ERROR! Problem with bytes_per_history!\n"); fgets(user_response, sizeof(user_response), stdin); exit(2); } num_histories = file_size / BYTES_PER_HISTORY; } else num_histories = file_size; fclose(data_file); histories_per_file[file_number] = num_histories; histories_per_gantry_angle[gantry_position_number] += num_histories; histories_per_scan[scan_number-1] += num_histories; total_histories += num_histories; if( DEBUG_TEXT_ON ) printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number); } } if( DEBUG_TEXT_ON ) { for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ ) { if( file_number % NUM_SCANS == 0 ) printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) ); printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 ); } for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ ) printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1); printf("There are a Total of %d Histories\n", total_histories); } } void count_histories_v0() { if( DEBUG_TEXT_ON ) puts("Counting histories...\n"); char user_response[20]; char data_filename[256]; int file_size, num_histories, file_number = 0, gantry_position_number = 0; for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ ) { for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ ) { sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension ); ifstream data_file(data_filename, ios::binary); if( data_file == NULL ) { fputs( "File not found: Check that the directories and files are properly named.", stderr ); fgets(user_response, sizeof(user_response), stdin); exit(1); } char magic_number[5]; data_file.read(magic_number, 4); magic_number[4] = '\0'; if( strcmp(magic_number, "PCTD") ) { puts("Error: unknown file type (should be PCTD)!\n"); fgets(user_response, sizeof(user_response), stdin); exit(1); } int version_id; data_file.read((char*)&version_id, sizeof(int)); if( version_id == 0 ) { int num_histories; data_file.read((char*)&num_histories, sizeof(int)); data_file.close(); histories_per_file[file_number] = num_histories; histories_per_gantry_angle[gantry_position_number] += num_histories; histories_per_scan[scan_number-1] += num_histories; total_histories += num_histories; if( DEBUG_TEXT_ON ) printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number); } else { printf("ERROR: Unsupported format version (%d)!\n", version_id); fgets(user_response, sizeof(user_response), stdin); exit(1); } } } if( DEBUG_TEXT_ON ) { for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ ) { if( file_number % NUM_SCANS == 0 ) printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) ); printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 ); } for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ ) printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1); printf("There are a Total of %d Histories\n", total_histories); } // The GPU cannot process all the histories at once, so they are broken up into chunks that can fit on the GPU. As we iterate // through the data one chunk at a time, we determine which histories enter the reconstruction volume and if they belong to a // valid bin (i.e. t, v, and angular bin number is greater than zero and less than max). If both are true, we append the bin // number, WEPL, and relative entry/exit ut/uv angles to the following four arrays. We do not know ahead of time how many // valid histories there will be, so memory is allocated to accomodate every history and the actual number of valid histories // are counted. Although we waste some host memory, we can avoid writing intermediate information to file or keeping the raw // data and recalculating it every time its needed. Once all the data is processed and we know how many valid histories we // have, we simply ignore the illegitimate elements of the four arrays to avoid transferring invalid and unnecessary data to // and from the GPU. } void count_histories_v1() { if( DEBUG_TEXT_ON ) printf("Counting histories...\n"); char user_response[20]; char data_filename[128]; int file_size, num_histories, file_number = 0, gantry_position_number = 0; for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ ) { for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ ) { sprintf(data_filename, "%s%s/%s_%03d%%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension ); FILE *data_file = fopen(data_filename, "rb"); if( data_file == NULL ) { fputs( "Error Opening Data File: Check that the directories are properly named.", stderr ); fgets(user_response, sizeof(user_response), stdin); exit(1); } fseek( data_file, 0, SEEK_END ); file_size = ftell( data_file ); if( BINARY_ENCODING ) { if( file_size % BYTES_PER_HISTORY ) { printf("ERROR! Problem with bytes_per_history!\n"); fgets(user_response, sizeof(user_response), stdin); exit(2); } num_histories = file_size / BYTES_PER_HISTORY; } else num_histories = file_size; fclose(data_file); histories_per_file[file_number] = num_histories; histories_per_gantry_angle[gantry_position_number] += num_histories; histories_per_scan[scan_number-1] += num_histories; total_histories += num_histories; if( DEBUG_TEXT_ON ) printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number); } } if( DEBUG_TEXT_ON ) { for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ ) { if( file_number % NUM_SCANS == 0 ) printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) ); printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 ); } for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ ) printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1); printf("There are a Total of %d Histories\n", total_histories); } // The GPU cannot process all the histories at once, so they are broken up into chunks that can fit on the GPU. As we iterate // through the data one chunk at a time, we determine which histories enter the reconstruction volume and if they belong to a // valid bin (i.e. t, v, and angular bin number is greater than zero and less than max). If both are true, we append the bin // number, WEPL, and relative entry/exit ut/uv angles to the following four arrays. We do not know ahead of time how many // valid histories there will be, so memory is allocated to accomodate every history and the actual number of valid histories // are counted. Although we waste some host memory, we can avoid writing intermediate information to file or keeping the raw // data and recalculating it every time its needed. Once all the data is processed and we know how many valid histories we // have, we simply ignore the illegitimate elements of the four arrays to avoid transferring invalid and unnecessary data to // and from the GPU. } void reserve_vector_capacity() { bin_num_vector.reserve( total_histories ); //gantry_angle_vector.reserve( total_histories ); WEPL_vector.reserve( total_histories ); x_entry_vector.reserve( total_histories ); y_entry_vector.reserve( total_histories ); z_entry_vector.reserve( total_histories ); x_exit_vector.reserve( total_histories ); y_exit_vector.reserve( total_histories ); z_exit_vector.reserve( total_histories ); xy_entry_angle_vector.reserve( total_histories ); xz_entry_angle_vector.reserve( total_histories ); //xy_exit_angle_vector.reserve( total_histories ); //xz_exit_angle_vector.reserve( total_histories ); relative_ut_angle_vector.reserve( total_histories ); relative_uv_angle_vector.reserve( total_histories ); } /************************************************************************************************************************************************************/ /********************************************************* Data Importation, Initial Cuts, and Binning ******************************************************/ /************************************************************************************************************************************************************/ void iterative_data_read_old( int num_histories, int start_file_num, int end_file_num ) { unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; t_in_1_h = (float*) malloc(mem_size_hist_floats); t_in_2_h = (float*) malloc(mem_size_hist_floats); t_out_1_h = (float*) malloc(mem_size_hist_floats); t_out_2_h = (float*) malloc(mem_size_hist_floats); u_in_1_h = (float*) malloc(mem_size_hist_floats); u_in_2_h = (float*) malloc(mem_size_hist_floats); u_out_1_h = (float*) malloc(mem_size_hist_floats); u_out_2_h = (float*) malloc(mem_size_hist_floats); v_in_1_h = (float*) malloc(mem_size_hist_floats); v_in_2_h = (float*) malloc(mem_size_hist_floats); v_out_1_h = (float*) malloc(mem_size_hist_floats); v_out_2_h = (float*) malloc(mem_size_hist_floats); WEPL_h = (float*) malloc(mem_size_hist_floats); gantry_angle_h = (int*) malloc(mem_size_hist_ints); int array_index = 0, gantry_position, gantry_angle, scan_number, scan_histories; float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data; char tracker_plane[4]; char data_filename[128]; FILE* data_file; for( int file_num = start_file_num; file_num <= end_file_num; file_num++ ) { gantry_position = file_num / NUM_SCANS; gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL; scan_number = file_num % NUM_SCANS + 1; scan_histories = histories_per_file[file_num]; printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number ); sprintf( data_filename, "%s%s/%s_trans%d_%03d%s", input_directory, input_folder, input_base_name, scan_number, gantry_angle, file_extension ); data_file = fopen( data_filename, "rb" ); for( int history = 0; history < scan_histories; history++, array_index++ ) { fread(&v_data, sizeof(float), 4, data_file); fread(&t_data, sizeof(float), 4, data_file); fread(&tracker_plane, sizeof(char), 4, data_file); fread(&WEPL_data, sizeof(float), 1, data_file); fread(&gantry_angle_data, sizeof(float), 1, data_file); fread(&dummy_data, sizeof(float), 1, data_file); // dummy read because each event has an extra 4 bytes, for some reason if( DATA_IN_MM ) { // Convert the input data from mm to cm v_in_1_h[array_index] = v_data[0] * 0.1; v_in_2_h[array_index] = v_data[1] * 0.1; v_out_1_h[array_index] = v_data[2] * 0.1; v_out_2_h[array_index] = v_data[3] * 0.1; t_in_1_h[array_index] = t_data[0] * 0.1; t_in_2_h[array_index] = t_data[1] * 0.1; t_out_1_h[array_index] = t_data[2] * 0.1; t_out_2_h[array_index] = t_data[3] * 0.1; WEPL_h[array_index] = WEPL_data * 0.1; } else { v_in_1_h[array_index] = v_data[0]; v_in_2_h[array_index] = v_data[1]; v_out_1_h[array_index] = v_data[2]; v_out_2_h[array_index] = v_data[3]; t_in_1_h[array_index] = t_data[0]; t_in_2_h[array_index] = t_data[1]; t_out_1_h[array_index] = t_data[2]; t_out_2_h[array_index] = t_data[3]; WEPL_h[array_index] = WEPL_data; } if( !MICAH_SIM ) { u_in_1_h[array_index] = SSD_u_Positions[int(tracker_plane[0])]; u_in_2_h[array_index] = SSD_u_Positions[int(tracker_plane[1])]; u_out_1_h[array_index] = SSD_u_Positions[int(tracker_plane[2])]; u_out_2_h[array_index] = SSD_u_Positions[int(tracker_plane[3])]; } else { u_in_1_h[array_index] = SSD_u_Positions[0]; u_in_2_h[array_index] = SSD_u_Positions[2]; u_out_1_h[array_index] = SSD_u_Positions[4]; u_out_2_h[array_index] = SSD_u_Positions[6]; } if( SSD_IN_MM ) { // Convert the tracking plane positions from mm to cm u_in_1_h[array_index] *= 0.1; u_in_2_h[array_index] *= 0.1; u_out_1_h[array_index] *= 0.1; u_out_2_h[array_index] *= 0.1; } gantry_angle_h[array_index] = int(gantry_angle_data); } fclose(data_file); } } void iterative_data_read_v0( int num_histories, int start_file_num, int end_file_num ) { unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; t_in_1_h = (float*) malloc(mem_size_hist_floats); t_in_2_h = (float*) malloc(mem_size_hist_floats); t_out_1_h = (float*) malloc(mem_size_hist_floats); t_out_2_h = (float*) malloc(mem_size_hist_floats); u_in_1_h = (float*) malloc(mem_size_hist_floats); u_in_2_h = (float*) malloc(mem_size_hist_floats); u_out_1_h = (float*) malloc(mem_size_hist_floats); u_out_2_h = (float*) malloc(mem_size_hist_floats); v_in_1_h = (float*) malloc(mem_size_hist_floats); v_in_2_h = (float*) malloc(mem_size_hist_floats); v_out_1_h = (float*) malloc(mem_size_hist_floats); v_out_2_h = (float*) malloc(mem_size_hist_floats); WEPL_h = (float*) malloc(mem_size_hist_floats); gantry_angle_h = (int*) malloc(mem_size_hist_ints); /* Contains the following headers: Magic number identifier: "PCTD" (4-byte string) Format version identifier (integer) Number of events in file (integer) Projection angle (float | degrees) Beam energy (float | MeV) Acquisition/generation date (integer | Unix time) Pre-process date (integer | Unix time) Phantom name or description (variable length string) Data source (variable length string) Prepared by (variable length string) * Note on variable length strings: each variable length string should be preceded with an integer containing the number of characters in the string. Event data: Data is be stored with all of one type in a consecutive row, meaning the first entries will be N t0 values, where N is the number of events in the file. Next will be N t1 values, etc. This more closely matches the data structure in memory. Detector coordinates in mm relative to a phantom center, given in the detector coordinate system: t0 (float * N) t1 (float * N) t2 (float * N) t3 (float * N) v0 (float * N) v1 (float * N) v2 (float * N) v3 (float * N) u0 (float * N) u1 (float * N) u2 (float * N) u3 (float * N) WEPL in mm (float * N) */ char user_response[20]; char data_filename[128]; int array_index = 0; float min_WEPL = 20, max_WEPL = -20; for( int file_num = start_file_num; file_num <= end_file_num; file_num++ ) { int gantry_position = file_num / NUM_SCANS; int gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL; int scan_number = file_num % NUM_SCANS + 1; int scan_histories = histories_per_file[file_num]; printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number ); sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension ); ifstream data_file(data_filename, ios::binary); if( data_file == NULL ) { fputs( "File not found: Check that the directories and files are properly named.", stderr ); fgets(user_response, sizeof(user_response), stdin); exit(1); } char magic_number[5]; data_file.read(magic_number, 4); magic_number[4] = '\0'; if( strcmp(magic_number, "PCTD") ) { puts("Error: unknown file type (should be PCTD)!\n"); fgets(user_response, sizeof(user_response), stdin); exit(1); } int version_id; data_file.read((char*)&version_id, sizeof(int)); if( version_id == 0 ) { int num_histories; data_file.read((char*)&num_histories, sizeof(int)); puts("Reading headers from file...\n"); float projection_angle, beam_energy; int generation_date, preprocess_date; int phantom_name_size, data_source_size, prepared_by_size; char *phantom_name, *data_source, *prepared_by; data_file.read((char*)&projection_angle, sizeof(float)); data_file.read((char*)&beam_energy, sizeof(float)); data_file.read((char*)&generation_date, sizeof(int)); data_file.read((char*)&preprocess_date, sizeof(int)); data_file.read((char*)&phantom_name_size, sizeof(int)); phantom_name = (char*)malloc(phantom_name_size); data_file.read(phantom_name, phantom_name_size); data_file.read((char*)&data_source_size, sizeof(int)); data_source = (char*)malloc(data_source_size); data_file.read(data_source, data_source_size); data_file.read((char*)&prepared_by_size, sizeof(int)); prepared_by = (char*)malloc(prepared_by_size); data_file.read(prepared_by, prepared_by_size); printf("Loading %d histories from file\n", num_histories); int data_size = num_histories * sizeof(float); data_file.read((char*)t_in_1_h, data_size); data_file.read((char*)t_in_2_h, data_size); data_file.read((char*)t_out_1_h, data_size); data_file.read((char*)t_out_2_h, data_size); data_file.read((char*)v_in_1_h, data_size); data_file.read((char*)v_in_2_h, data_size); data_file.read((char*)v_out_1_h, data_size); data_file.read((char*)v_out_2_h, data_size); data_file.read((char*)u_in_1_h, data_size); data_file.read((char*)u_in_2_h, data_size); data_file.read((char*)u_out_1_h, data_size); data_file.read((char*)u_out_2_h, data_size); data_file.read((char*)WEPL_h, data_size); //float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data; for( int i = 0; i < num_histories; i++ ) { if( DATA_IN_MM ) { // Convert the input data from mm to cm v_in_1_h[i] *= 0.1; v_in_2_h[i] *= 0.1; v_out_1_h[i] *= 0.1; v_out_2_h[i] *= 0.1; t_in_1_h[i] *= 0.1; t_in_2_h[i] *= 0.1; t_out_1_h[i] *= 0.1; t_out_2_h[i] *= 0.1; WEPL_h[i] *= 0.1; if( WEPL_h[i] < 0 ) printf("WEPL[%d] = %3f\n", i, WEPL_h[i] ); u_in_1_h[i] *= 0.1; u_in_2_h[i] *= 0.1; u_out_1_h[i] *= 0.1; u_out_2_h[i] *= 0.1; } gantry_angle_h[i] = int(projection_angle); } data_file.close(); } } } void iterative_data_read_v1( int num_histories, int start_file_num, int end_file_num ){ unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; t_in_1_h = (float*) malloc(mem_size_hist_floats); t_in_2_h = (float*) malloc(mem_size_hist_floats); t_out_1_h = (float*) malloc(mem_size_hist_floats); t_out_2_h = (float*) malloc(mem_size_hist_floats); u_in_1_h = (float*) malloc(mem_size_hist_floats); u_in_2_h = (float*) malloc(mem_size_hist_floats); u_out_1_h = (float*) malloc(mem_size_hist_floats); u_out_2_h = (float*) malloc(mem_size_hist_floats); v_in_1_h = (float*) malloc(mem_size_hist_floats); v_in_2_h = (float*) malloc(mem_size_hist_floats); v_out_1_h = (float*) malloc(mem_size_hist_floats); v_out_2_h = (float*) malloc(mem_size_hist_floats); WEPL_h = (float*) malloc(mem_size_hist_floats); gantry_angle_h = (int*) malloc(mem_size_hist_ints); /* Contains the following headers: Magic number identifier: "PCTD" (4-byte string) Format version identifier (integer) Number of events in file (integer) Projection angle (float | degrees) Beam energy (float | MeV) Acquisition/generation date (integer | Unix time) Pre-process date (integer | Unix time) Phantom name or description (variable length string) Data source (variable length string) Prepared by (variable length string) * Note on variable length strings: each variable length string should be preceded with an integer containing the number of characters in the string. Event data: Data is be stored with all of one type in a consecutive row, meaning the first entries will be N t0 values, where N is the number of events in the file. Next will be N t1 values, etc. This more closely matches the data structure in memory. Detector coordinates in mm relative to a phantom center, given in the detector coordinate system: t0 (float * N) t1 (float * N) t2 (float * N) t3 (float * N) v0 (float * N) v1 (float * N) v2 (float * N) v3 (float * N) u0 (float * N) u1 (float * N) u2 (float * N) u3 (float * N) WEPL in mm (float * N) */ char user_response[20]; char data_filename[128]; int array_index = 0; float min_WEPL = 20, max_WEPL = -20; for( int file_num = start_file_num; file_num <= end_file_num; file_num++ ) { int gantry_position = file_num / NUM_SCANS; int gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL; int scan_number = file_num % NUM_SCANS + 1; int scan_histories = histories_per_file[file_num]; printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number ); sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension ); ifstream data_file(data_filename, ios::binary); if( data_file == NULL ) { fputs( "File not found: Check that the directories and files are properly named.", stderr ); fgets(user_response, sizeof(user_response), stdin); exit(1); } char magic_number[5]; data_file.read(magic_number, 4); magic_number[4] = '\0'; if( strcmp(magic_number, "PCTD") ) { puts("Error: unknown file type (should be PCTD)!\n"); fgets(user_response, sizeof(user_response), stdin); exit(1); } int version_id; data_file.read((char*)&version_id, sizeof(int)); if( version_id == 0 ) { int num_histories; data_file.read((char*)&num_histories, sizeof(int)); puts("Reading headers from file...\n"); float projection_angle, beam_energy; int generation_date, preprocess_date; int phantom_name_size, data_source_size, prepared_by_size; char *phantom_name, *data_source, *prepared_by; data_file.read((char*)&projection_angle, sizeof(float)); data_file.read((char*)&beam_energy, sizeof(float)); data_file.read((char*)&generation_date, sizeof(int)); data_file.read((char*)&preprocess_date, sizeof(int)); data_file.read((char*)&phantom_name_size, sizeof(int)); phantom_name = (char*)malloc(phantom_name_size); data_file.read(phantom_name, phantom_name_size); data_file.read((char*)&data_source_size, sizeof(int)); data_source = (char*)malloc(data_source_size); data_file.read(data_source, data_source_size); data_file.read((char*)&prepared_by_size, sizeof(int)); prepared_by = (char*)malloc(prepared_by_size); data_file.read(prepared_by, prepared_by_size); printf("Loading %d histories from file\n", num_histories); int data_size = num_histories * sizeof(float); data_file.read((char*)t_in_1_h, data_size); data_file.read((char*)t_in_2_h, data_size); data_file.read((char*)t_out_1_h, data_size); data_file.read((char*)t_out_2_h, data_size); data_file.read((char*)v_in_1_h, data_size); data_file.read((char*)v_in_2_h, data_size); data_file.read((char*)v_out_1_h, data_size); data_file.read((char*)v_out_2_h, data_size); data_file.read((char*)u_in_1_h, data_size); data_file.read((char*)u_in_2_h, data_size); data_file.read((char*)u_out_1_h, data_size); data_file.read((char*)u_out_2_h, data_size); data_file.read((char*)WEPL_h, data_size); //float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data; for( int i = 0; i < num_histories; i++ ) { if( DATA_IN_MM ) { // Convert the input data from mm to cm v_in_1_h[i] *= 0.1; v_in_2_h[i] *= 0.1; v_out_1_h[i] *= 0.1; v_out_2_h[i] *= 0.1; t_in_1_h[i] *= 0.1; t_in_2_h[i] *= 0.1; t_out_1_h[i] *= 0.1; t_out_2_h[i] *= 0.1; WEPL_h[i] *= 0.1; if( WEPL_h[i] < 0 ) printf("WEPL[%d] = %3f\n", i, WEPL_h[i] ); u_in_1_h[i] *= 0.1; u_in_2_h[i] *= 0.1; u_out_1_h[i] *= 0.1; u_out_2_h[i] *= 0.1; } gantry_angle_h[i] = int(projection_angle); } data_file.close(); } } } void recon_volume_intersections( int num_histories ) { //printf("There are %d histories in this projection\n", num_histories ); unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; unsigned int mem_size_hist_bool = sizeof(bool) * num_histories; // Allocate GPU memory hipMalloc((void**) &t_in_1_d, mem_size_hist_floats); hipMalloc((void**) &t_in_2_d, mem_size_hist_floats); hipMalloc((void**) &t_out_1_d, mem_size_hist_floats); hipMalloc((void**) &t_out_2_d, mem_size_hist_floats); hipMalloc((void**) &u_in_1_d, mem_size_hist_floats); hipMalloc((void**) &u_in_2_d, mem_size_hist_floats); hipMalloc((void**) &u_out_1_d, mem_size_hist_floats); hipMalloc((void**) &u_out_2_d, mem_size_hist_floats); hipMalloc((void**) &v_in_1_d, mem_size_hist_floats); hipMalloc((void**) &v_in_2_d, mem_size_hist_floats); hipMalloc((void**) &v_out_1_d, mem_size_hist_floats); hipMalloc((void**) &v_out_2_d, mem_size_hist_floats); hipMalloc((void**) &WEPL_d, mem_size_hist_floats); hipMalloc((void**) &gantry_angle_d, mem_size_hist_ints); hipMalloc((void**) &x_entry_d, mem_size_hist_floats); hipMalloc((void**) &y_entry_d, mem_size_hist_floats); hipMalloc((void**) &z_entry_d, mem_size_hist_floats); hipMalloc((void**) &x_exit_d, mem_size_hist_floats); hipMalloc((void**) &y_exit_d, mem_size_hist_floats); hipMalloc((void**) &z_exit_d, mem_size_hist_floats); hipMalloc((void**) &xy_entry_angle_d, mem_size_hist_floats); hipMalloc((void**) &xz_entry_angle_d, mem_size_hist_floats); hipMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats); hipMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats); hipMalloc((void**) &relative_ut_angle_d, mem_size_hist_floats); hipMalloc((void**) &relative_uv_angle_d, mem_size_hist_floats); hipMalloc((void**) &traversed_recon_volume_d, mem_size_hist_bool); hipMemcpy(t_in_1_d, t_in_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(t_in_2_d, t_in_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(t_out_1_d, t_out_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(t_out_2_d, t_out_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(u_in_1_d, u_in_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(u_in_2_d, u_in_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(u_out_1_d, u_out_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(u_out_2_d, u_out_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(v_in_1_d, v_in_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(v_in_2_d, v_in_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(v_out_1_d, v_out_1_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(v_out_2_d, v_out_2_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; hipMemcpy(gantry_angle_d, gantry_angle_h, mem_size_hist_ints, hipMemcpyHostToDevice) ; hipMemcpy(WEPL_d, WEPL_h, mem_size_hist_floats, hipMemcpyHostToDevice) ; dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1); hipLaunchKernelGGL(( recon_volume_intersections_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, num_histories, gantry_angle_d, traversed_recon_volume_d, WEPL_d, t_in_1_d, t_in_2_d, t_out_1_d, t_out_2_d, u_in_1_d, u_in_2_d, u_out_1_d, u_out_2_d, v_in_1_d, v_in_2_d, v_out_1_d, v_out_2_d, x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d, xy_entry_angle_d, xz_entry_angle_d, xy_exit_angle_d, xz_exit_angle_d, relative_ut_angle_d, relative_uv_angle_d ); free(t_in_1_h); free(t_in_2_h); free(v_in_1_h); free(v_in_2_h); free(u_in_1_h); free(u_in_2_h); free(t_out_1_h); free(t_out_2_h); free(v_out_1_h); free(v_out_2_h); free(u_out_1_h); free(u_out_2_h); hipFree(t_in_1_d); hipFree(t_in_2_d); hipFree(v_in_1_d); hipFree(v_in_2_d); hipFree(u_in_1_d); hipFree(u_in_2_d); hipFree(t_out_1_d); hipFree(t_out_2_d); hipFree(v_out_1_d); hipFree(v_out_2_d); hipFree(u_out_1_d); hipFree(u_out_2_d); hipFree(gantry_angle_d); } __global__ void recon_volume_intersections_kernel ( int num_histories, int* gantry_angle, bool* traversed_recon_volume, float* WEPL, float* t_in_1, float* t_in_2, float* t_out_1, float* t_out_2, float* u_in_1, float* u_in_2, float* u_out_1, float* u_out_2, float* v_in_1, float* v_in_2, float* v_out_1, float* v_out_2, float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit, float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle, float* relative_ut_angle, float* relative_uv_angle ) { /* Determine if the proton path passes through the reconstruction volume (i.e. intersects the reconstruction cylinder twice) and if it does, determine the x, y, and z positions in the global/object coordinate system where the proton enters and exits the reconstruction volume. The origin of the object coordinate system is defined to be at the center of the reconstruction cylinder so that its volume is bounded by: -RECON_CYL_RADIUS <= x <= RECON_CYL_RADIUS -RECON_CYL_RADIUS <= y <= RECON_CYL_RADIUS -RECON_CYL_HEIGHT/2 <= z <= RECON_CYL_HEIGHT/2 First, the coordinates of the points where the proton path intersected the entry/exit detectors must be calculated. Since the detectors records data in the detector coordinate system, data in the utv coordinate system must be converted into the global/object coordinate system. The coordinate transformation can be accomplished using a rotation matrix with an angle of rotation determined by the angle between the two coordinate systems, which is the gantry_angle, in this case: Rotate ut-coordinate system to xy-coordinate system x = cos( gantry_angle ) * u - sin( gantry_angle ) * t y = sin( gantry_angle ) * u + cos( gantry_angle ) * t Rotate xy-coordinate system to ut-coordinate system u = cos( gantry_angle ) * x + sin( gantry_angle ) * y t = cos( gantry_angle ) * y - sin( gantry_angle ) * x If a proton passes through the reconstruction volume, then the line defining its path in the xy-plane will intersect the circle defining the boundary of the reconstruction cylinder in the xy-plane twice. We can determine if the proton path passes through the reconstruction volume by equating the equations of the proton path and the circle. This produces a second order polynomial which we must solve: f(x)_proton = f(x)_cylinder mx+b = sqrt(r^2 - x^2) m^2x^2 + 2mbx + b^2 = r^2 - x^2 (m^2 + 1)x^2 + 2mbx + (b^2 - r^2) = 0 ax^2 + bx + c = 0 => a = m^2 + 1 b = 2mb c = b^2 - r^2 We can solve this using the quadratic formula ([-b +/- sqrt(b^2-4ac)]/2a). If the proton passed through the reconstruction volume, then the determinant will be greater than zero ( b^2-4ac > 0 ) and the quadratic formula will return two unique points of intersection. The intersection point closest to where the proton entry/exit path intersects the entry/exit detector plane is calculated and The proton entry/exit path If the determinant <= 0, then the proton path does not go through the reconstruction volume and we need not determine intersection coordinates. Two points are returned by the quadratic formula for each reconstruction cylinder intersection, the coordinates closest to the point where the entry/exit path intersected the detector plane are determined If the exit/entry path travels through the cone bounded by y=|x| && y=-|x| the x_coordinates will be small and the difference between the entry and exit x-coordinates will approach zero, causing instabilities in trig functions and slope calculations ( x difference in denominator). To overcome these innaccurate calculations, coordinates for these proton paths will be rotated PI/2 radians(90 degrees) prior to calculations and rotated back when they are completed using a rotation matrix transformation again: Positive Rotation By 90 Degrees x' = cos( 90 ) * x - sin( 90 ) * y = -y y' = sin( 90 ) * x + cos( 90 ) * y = x Negative Rotation By 90 Degree x' = cos( 90 ) * x + sin( 90 ) * y = y y' = cos( 90 ) * y - sin( 90 ) * x = -x */ float a = 0, b = 0, c = 0; float x_intercept_1, x_intercept_2, y_intercept_1, y_intercept_2, squared_distance_1, squared_distance_2; float x_temp, y_temp; int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; float rotation_angle_radians = gantry_angle[i] * ANGLE_TO_RADIANS; traversed_recon_volume[i] = false; if( i < num_histories ) { /***************************************************************************************************************/ /**************************************** Check entry information **********************************************/ /***************************************************************************************************************/ // Determine if the proton path enters the reconstruction volume. The proton path is defined using the entry angle and // position where the proton intersected the entry SSD which is closest to the object. If this line projected onto the // xy plane intersects the reconstruction cylinder, the line will intersect the circle in the xy plane which describes the // boundary of the reconstruction cylinder twice and its entry elevation will be within the height of the cylinder. // Relevant angles in radians: gantry angle, proton path entry angle in ut and xy planes. float ut_entry_angle = atan2f( t_in_2[i] - t_in_1[i], u_in_2[i] - u_in_1[i] ); xy_entry_angle[i] = ut_entry_angle + rotation_angle_radians; if( xy_entry_angle[i] < 0 ) xy_entry_angle[i] += TWO_PI; // Rotate entry detector positions float x_in = ( cosf( rotation_angle_radians ) * u_in_2[i] ) - ( sinf( rotation_angle_radians ) * t_in_2[i] ); float y_in = ( sinf( rotation_angle_radians ) * u_in_2[i] ) + ( cosf( rotation_angle_radians ) * t_in_2[i] ); // Determine if entry points should be rotated bool entry_in_cone = ( (xy_entry_angle[i] > PI_OVER_4) && (xy_entry_angle[i] < THREE_PI_OVER_4) ) || ( (xy_entry_angle[i] > FIVE_PI_OVER_4) && (xy_entry_angle[i] < SEVEN_PI_OVER_4) ); // Rotate x_in & y_in by 90 degrees, if necessary if( entry_in_cone ) { x_temp = x_in; y_temp = y_in; x_in = -y_temp; y_in = x_temp; xy_entry_angle[i] += PI_OVER_2; } float m_in = tanf( xy_entry_angle[i] ); // proton entry path slope float b_in = y_in - m_in * x_in; // proton entry path y-intercept // Quadratic formula coefficients a = 1 + pow(m_in, 2); // x^2 coefficient b = 2 * m_in * b_in; // x coefficient c = pow(b_in, 2) - pow(RECON_CYL_RADIUS, 2 ); // 1 coefficient float entry_discriminant = pow(b, 2) - (4 * a * c); // Quadratic formula discriminant bool entered = ( entry_discriminant > 0 ); // Proton path intersected twice // Find both intersection points of the circle; closest one to the entry SSDs is the entry position // Notice that x_intercept_2 = ( -b - sqrt(...) ) / ( 2 * a ) has the negative sign pulled out and following calculations modified as necessary // e.g. x_intercept_2 = -x_real_2 // y_intercept_2 = -y_real_2 // squared_distance_2 = sqd_real_2 since (x_intercept_2 + x_in)^2 = (-x_intercept_2 - x_in)^2 = (x_real_2 - x_in)^2 (same for y term) // This negation is also considered when assigning x_entry/y_entry using -x_intercept_2/y_intercept_2 *(TRUE/FALSE = 1/0) if( entered ) { x_intercept_1 = ( sqrtf(entry_discriminant) - b ) / ( 2 * a ); x_intercept_2 = ( sqrtf(entry_discriminant) + b ) / ( 2 * a ); y_intercept_1 = m_in * x_intercept_1 + b_in; y_intercept_2 = m_in * x_intercept_2 - b_in; squared_distance_1 = pow(x_intercept_1 - x_in, 2) + pow(y_intercept_1 - y_in, 2); squared_distance_2 = pow(x_intercept_2 + x_in, 2) + pow(y_intercept_2 + y_in, 2); x_entry[i] = x_intercept_1 * (squared_distance_1 <= squared_distance_2) - x_intercept_2 * (squared_distance_1 > squared_distance_2); y_entry[i] = y_intercept_1 * (squared_distance_1 <= squared_distance_2) - y_intercept_2 * (squared_distance_1 > squared_distance_2); } // Unrotate by 90 degrees, if necessary if( entry_in_cone ) { x_temp = x_entry[i]; y_temp = y_entry[i]; x_entry[i] = y_temp; y_entry[i] = -x_temp; xy_entry_angle[i] -= PI_OVER_2; } /***************************************************************************************************************/ /****************************************** Check exit information *********************************************/ /***************************************************************************************************************/ // Repeat the procedure above, this time to determine if the proton path exited the reconstruction volume and if so, the // x,y,z position where it exited float ut_exit_angle = atan2f( t_out_2[i] - t_out_1[i], u_out_2[i] - u_out_1[i] ); xy_exit_angle[i] = ut_exit_angle + rotation_angle_radians; if( xy_exit_angle[i] < 0 ) xy_exit_angle[i] += TWO_PI; // Rotate exit detector positions float x_out = ( cosf(rotation_angle_radians) * u_out_1[i] ) - ( sinf(rotation_angle_radians) * t_out_1[i] ); float y_out = ( sinf(rotation_angle_radians) * u_out_1[i] ) + ( cosf(rotation_angle_radians) * t_out_1[i] ); // Determine if exit points should be rotated bool exit_in_cone = ( (xy_exit_angle[i] > PI_OVER_4) && (xy_exit_angle[i] < THREE_PI_OVER_4) ) || ( (xy_exit_angle[i] > FIVE_PI_OVER_4) && (xy_exit_angle[i] < SEVEN_PI_OVER_4) ); // Rotate x_out & y_out by 90 degrees, if necessary if( exit_in_cone ) { x_temp = x_out; y_temp = y_out; x_out = -y_temp; y_out = x_temp; xy_exit_angle[i] += PI_OVER_2; } float m_out = tanf( xy_exit_angle[i] ); // proton entry path slope float b_out = y_out - m_out * x_out; // proton entry path y-intercept // Quadratic formula coefficients a = 1 + pow(m_out, 2); // x^2 coefficient b = 2 * m_out * b_out; // x coefficient c = pow(b_out, 2) - pow(RECON_CYL_RADIUS, 2); // 1 coefficient float exit_discriminant = pow(b, 2) - (4 * a * c); // Quadratic formula discriminant bool exited = ( exit_discriminant > 0 ); // Proton path intersected twice // Find both intersection points of the circle; closest one to the exit SSDs is the exit position if( exited ) { x_intercept_1 = ( sqrtf(exit_discriminant) - b ) / ( 2 * a ); x_intercept_2 = ( sqrtf(exit_discriminant) + b ) / ( 2 * a );// -x calculated y_intercept_1 = m_out * x_intercept_1 + b_out; y_intercept_2 = m_out * x_intercept_2 - b_out;// -y calculated squared_distance_1 = pow(x_intercept_1 - x_out, 2) + pow(y_intercept_1 - y_out, 2); squared_distance_2 = pow(x_intercept_2 + x_out, 2) + pow(y_intercept_2 + y_out, 2);// modified due to -x and -y calcs above x_exit[i] = x_intercept_1 * (squared_distance_1 <= squared_distance_2) - x_intercept_2 * (squared_distance_1 > squared_distance_2); y_exit[i] = y_intercept_1 * (squared_distance_1 <= squared_distance_2) - y_intercept_2 * (squared_distance_1 > squared_distance_2); } // Unrotate by 90 degrees, if necessary if( exit_in_cone ) { x_temp = x_exit[i]; y_temp = y_exit[i]; x_exit[i] = y_temp; y_exit[i] = -x_temp; xy_exit_angle[i] -= PI_OVER_2; } /***************************************************************************************************************/ /***************************************** Check z(v) direction ************************************************/ /***************************************************************************************************************/ // Relevant angles/slopes in radians for entry and exit in the uv plane float uv_entry_slope = ( v_in_2[i] - v_in_1[i] ) / ( u_in_2[i] - u_in_1[i] ); float uv_exit_slope = ( v_out_2[i] - v_out_1[i] ) / ( u_out_2[i] - u_out_1[i] ); float uv_entry_angle = atan2( v_in_2[i] - v_in_1[i], u_in_2[i] - u_in_1[i] ); float uv_exit_angle = atan2( v_out_2[i] - v_out_1[i], u_out_2[i] - u_out_1[i] ); xz_entry_angle[i] = uv_entry_angle; xz_exit_angle[i] = uv_exit_angle; if( xz_entry_angle[i] < 0 ) xz_entry_angle[i] += TWO_PI; if( xz_exit_angle[i] < 0 ) xz_exit_angle[i] += TWO_PI; // Calculate the u coordinate for the entry and exit points of the reconstruction volume and then use the uv slope calculated // from the detector entry and exit positions to determine the z position of the proton as it entered and exited the // reconstruction volume /* u-coordinate of the entry and exit points of the reconsruction cylinder can be found using an inverse rotation u = cos( gantry_angle ) * x + sin( gantry_angle ) * y */ float u_entry = ( cosf( rotation_angle_radians ) * x_entry[i] ) + ( sinf( rotation_angle_radians ) * y_entry[i] ); float u_exit = ( cosf(rotation_angle_radians) * x_exit[i] ) + ( sinf(rotation_angle_radians) * y_exit[i] ); z_entry[i] = v_in_2[i] + uv_entry_slope * ( u_entry - u_in_2[i] ); z_exit[i] = v_out_1[i] - uv_exit_slope * ( u_out_1[i] - u_exit ); // Even if the proton path intersected the circle describing the boundary of the cylinder twice, it may not have actually // passed through the reconstruction volume or may have only passed through part way. If |z_entry|> RECON_CYL_HEIGHT/2 , // then something off happened since the the source is around z=0 and we do not want to use this history. If the // |z_entry| < RECON_CYL_HEIGHT/2 and |z_exit| > RECON_CYL_HEIGHT/2 then we want to use the history but the x_exit and // y_exit positions need to be calculated again based on how far through the cylinder the proton passed before exiting it if( entered && exited ) { if( ( fabs(z_entry[i]) <= RECON_CYL_HEIGHT * 0.5 ) && ( fabs(z_exit[i]) > RECON_CYL_HEIGHT * 0.5 ) ) { float recon_cyl_fraction = fabs( ( ( (z_exit[i] >= 0) - (z_exit[i] < 0) ) * RECON_CYL_HEIGHT * 0.5 - z_entry[i] ) / ( z_exit[i] - z_entry[i] ) ); x_exit[i] = x_entry[i] + recon_cyl_fraction * ( x_exit[i] - x_entry[i] ); y_exit[i] = y_entry[i] + recon_cyl_fraction * ( y_exit[i] - y_entry[i] ); z_exit[i] = ( (z_exit[i] >= 0) - (z_exit[i] < 0) ) * RECON_CYL_HEIGHT * 0.5; } else if( fabs(z_entry[i]) > RECON_CYL_HEIGHT * 0.5 ) { entered = false; exited = false; } // Check the measurement locations. Do not allow more than 5 cm difference in entry and exit in t and v. This gets // rid of spurious events. if( ( fabs(t_out_1[i] - t_in_2[i]) > 5 ) || ( fabs(v_out_1[i] - v_in_2[i]) > 5 ) ) { entered = false; exited = false; } } relative_ut_angle[i] = ut_exit_angle - ut_entry_angle; relative_uv_angle[i] = uv_exit_angle - uv_entry_angle; // Proton passed through the reconstruction volume only if it both entered and exited the reconstruction cylinder traversed_recon_volume[i] = entered && exited; } } void bin_valid_histories( int num_histories ) { unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; unsigned int mem_size_hist_bool = sizeof(bool) * num_histories; traversed_recon_volume_h = (bool*) calloc( num_histories, sizeof(bool) ); bin_num_h = (int*) calloc( num_histories, sizeof(int) ); x_entry_h = (float*) calloc( num_histories, sizeof(float) ); y_entry_h = (float*) calloc( num_histories, sizeof(float) ); z_entry_h = (float*) calloc( num_histories, sizeof(float) ); x_exit_h = (float*) calloc( num_histories, sizeof(float) ); y_exit_h = (float*) calloc( num_histories, sizeof(float) ); z_exit_h = (float*) calloc( num_histories, sizeof(float) ); xy_entry_angle_h = (float*) calloc( num_histories, sizeof(float) ); xz_entry_angle_h = (float*) calloc( num_histories, sizeof(float) ); xy_exit_angle_h = (float*) calloc( num_histories, sizeof(float) ); xz_exit_angle_h = (float*) calloc( num_histories, sizeof(float) ); relative_ut_angle_h = (float*) calloc( num_histories, sizeof(float) ); relative_uv_angle_h = (float*) calloc( num_histories, sizeof(float) ); hipMalloc((void**) &bin_num_d, mem_size_hist_ints ); hipMemcpy( bin_num_d, bin_num_h, mem_size_hist_ints, hipMemcpyHostToDevice ); dim3 dimBlock( THREADS_PER_BLOCK ); dim3 dimGrid( (int)( num_histories/THREADS_PER_BLOCK ) + 1 ); hipLaunchKernelGGL(( bin_valid_histories_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, num_histories, bin_counts_d, bin_num_d, traversed_recon_volume_d, x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d, WEPL_d, xy_entry_angle_d, xz_entry_angle_d, xy_exit_angle_d, xz_exit_angle_d, relative_ut_angle_d, relative_uv_angle_d ); hipMemcpy( traversed_recon_volume_h, traversed_recon_volume_d, mem_size_hist_bool, hipMemcpyDeviceToHost ); hipMemcpy( bin_num_h, bin_num_d, mem_size_hist_ints, hipMemcpyDeviceToHost ); hipMemcpy( x_entry_h, x_entry_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( y_entry_h, y_entry_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( z_entry_h, z_entry_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( x_exit_h, x_exit_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( y_exit_h, y_exit_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( z_exit_h, z_exit_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( xy_entry_angle_h, xy_entry_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( xz_entry_angle_h, xz_entry_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( xy_exit_angle_h, xy_exit_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( xz_exit_angle_h, xz_exit_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( relative_ut_angle_h, relative_ut_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); hipMemcpy( relative_uv_angle_h, relative_uv_angle_d, mem_size_hist_floats, hipMemcpyDeviceToHost ); int offset = 0; for( int i = 0; i < num_histories; i++ ) { if( traversed_recon_volume_h[i] && ( bin_num_h[i] >= 0 ) ) { bin_num_vector.push_back( bin_num_h[i] ); //gantry_angle_vector.push_back( gantry_angle_h[i] ); WEPL_vector.push_back( WEPL_h[i] ); x_entry_vector.push_back( x_entry_h[i] ); y_entry_vector.push_back( y_entry_h[i] ); z_entry_vector.push_back( z_entry_h[i] ); x_exit_vector.push_back( x_exit_h[i] ); y_exit_vector.push_back( y_exit_h[i] ); z_exit_vector.push_back( z_exit_h[i] ); xy_entry_angle_vector.push_back( xy_entry_angle_h[i] ); xz_entry_angle_vector.push_back( xz_entry_angle_h[i] ); //xy_exit_angle_vector.push_back( xy_exit_angle_h[i] ); //xz_exit_angle_vector.push_back( xz_exit_angle_h[i] ); relative_ut_angle_vector.push_back( relative_ut_angle_h[i] ); relative_uv_angle_vector.push_back( relative_uv_angle_h[i] ); offset++; recon_vol_histories++; } } printf( "%d out of %d histories passed intersection cuts this iteration\n", offset, num_histories ); free( traversed_recon_volume_h ); free( bin_num_h ); free( x_entry_h ); free( y_entry_h ); free( z_entry_h ); free( x_exit_h ); free( y_exit_h ); free( z_exit_h ); free( xy_entry_angle_h ); free( xz_entry_angle_h ); free( xy_exit_angle_h ); free( xz_exit_angle_h ); free( relative_ut_angle_h ); free( relative_uv_angle_h ); //hipFree( bin_num_d ); hipFree( xy_entry_angle_d ); hipFree( xz_entry_angle_d ); hipFree( xy_exit_angle_d ); hipFree( xz_exit_angle_d ); hipFree( relative_ut_angle_d ); hipFree( relative_uv_angle_d ); } __global__ void bin_valid_histories_kernel ( int num_histories, int* bin_counts, int* bin_num, bool* traversed_recon_volume, float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle, float* WEPL, float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle, float* relative_ut_angle, float* relative_uv_angle ) { int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if( i < num_histories ) { if( traversed_recon_volume[i] ) { float x_midpath, y_midpath, z_midpath, path_angle; int angle_bin, t_bin, v_bin; float angle, t, v; x_midpath = ( x_entry[i] + x_exit[i] ) / 2; y_midpath = ( y_entry[i] + y_exit[i] ) / 2; z_midpath = ( z_entry[i] + z_exit[i] ) / 2; path_angle = atan2( ( y_exit[i] - y_entry[i] ) , ( x_exit[i] - x_entry[i] ) ); if( path_angle < 0 ) path_angle += 2*PI; angle_bin = int( ( path_angle * RADIANS_TO_ANGLE / ANGULAR_BIN_SIZE ) + 0.5) % ANGULAR_BINS; angle = angle_bin * ANGULAR_BIN_SIZE * ANGLE_TO_RADIANS; t = y_midpath * cosf(angle) - x_midpath * sinf(angle); t_bin = int( (t / T_BIN_SIZE ) + T_BINS/2); v = z_midpath; v_bin = int( (v / V_BIN_SIZE ) + V_BINS/2); if( (t_bin >= 0) && (v_bin >= 0) && (t_bin < T_BINS) && (v_bin < V_BINS) ) { bin_num[i] = t_bin + angle_bin * T_BINS + v_bin * T_BINS * ANGULAR_BINS; atomicAdd( &bin_counts[bin_num[i]], 1 ); atomicAdd( &mean_WEPL[bin_num[i]], WEPL[i] ); atomicAdd( &mean_rel_ut_angle[bin_num[i]], relative_ut_angle[i] ); atomicAdd( &mean_rel_uv_angle[bin_num[i]], relative_uv_angle[i] ); } else bin_num[i] = -1; } } } /************************************************************************************************************************************************************/ /*************************************************************** Statistical Analysis and Cuts **************************************************************/ /************************************************************************************************************************************************************/ void calculate_means() { dim3 dimBlock( T_BINS ); dim3 dimGrid( V_BINS, ANGULAR_BINS ); hipLaunchKernelGGL(( calculate_means_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, bin_counts_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d ); //hipMemcpy( bin_counts_h, bin_counts_d, MEM_SIZE_BINS_INTS, hipMemcpyDeviceToHost ); //hipMemcpy( mean_WEPL_h, mean_WEPL_d, MEM_SIZE_BINS_FLOATS, hipMemcpyDeviceToHost ); //hipMemcpy( mean_rel_ut_angle_h, mean_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS, hipMemcpyDeviceToHost ); //hipMemcpy( mean_rel_uv_angle_h, mean_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS, hipMemcpyDeviceToHost ); //write_integer_array_to_file("bin_counts_h_pre", output_directory, output_folder, bin_counts_h, T_BINS, ANGULAR_BINS, V_BINS ); //write_float_array_to_file("mean_WEPL_h", output_directory, output_folder, mean_WEPL_h, T_BINS, ANGULAR_BINS, V_BINS ); //write_float_array_to_file("mean_rel_ut_angle_h", output_directory, output_folder, mean_rel_ut_angle_h, T_BINS, ANGULAR_BINS, V_BINS ); //write_float_array_to_file("mean_rel_uv_angle_h", output_directory, output_folder, mean_rel_uv_angle_h, T_BINS, ANGULAR_BINS, V_BINS ); free(bin_counts_h); free(mean_WEPL_h); free(mean_rel_ut_angle_h); free(mean_rel_uv_angle_h); } __global__ void calculate_means_kernel( int* bin_counts, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle ) { int v = blockIdx.x; int angle = blockIdx.y; int t = threadIdx.x; int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS; if( bin_counts[bin] > 0 ) { mean_WEPL[bin] /= bin_counts[bin]; mean_rel_ut_angle[bin] /= bin_counts[bin]; mean_rel_uv_angle[bin] /= bin_counts[bin]; } } void sum_differences( int start_position, int num_histories ) { unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; hipMalloc((void**) &bin_num_d, mem_size_hist_ints); hipMalloc((void**) &WEPL_d, mem_size_hist_floats); hipMalloc((void**) &xy_entry_angle_d, mem_size_hist_floats); hipMalloc((void**) &xz_entry_angle_d, mem_size_hist_floats); hipMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats); hipMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats); //hipMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats); //hipMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats); hipMalloc((void**) &relative_ut_angle_d, mem_size_hist_floats); hipMalloc((void**) &relative_uv_angle_d, mem_size_hist_floats); hipMemcpy( bin_num_d, &bin_num_vector[start_position], mem_size_hist_ints, hipMemcpyHostToDevice); hipMemcpy( WEPL_d, &WEPL_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice); hipMemcpy( xy_entry_angle_d, &xy_entry_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice); hipMemcpy( xz_entry_angle_d, &xz_entry_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice); //hipMemcpy( xy_exit_angle_d, &xy_exit_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice); //hipMemcpy( xz_exit_angle_d, &xz_exit_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice); hipMemcpy( relative_ut_angle_d, &relative_ut_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice); hipMemcpy( relative_uv_angle_d, &relative_uv_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice); dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1); hipLaunchKernelGGL(( sum_differences_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, num_histories, bin_num_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d, WEPL_d, xy_entry_angle_d, xz_entry_angle_d, xy_entry_angle_d, xz_entry_angle_d,//xy_exit_angle_d, xz_exit_angle_d, stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d, relative_ut_angle_d, relative_uv_angle_d ); hipFree( bin_num_d ); hipFree( WEPL_d ); hipFree( xy_entry_angle_d ); hipFree( xz_entry_angle_d ); //hipFree( xy_exit_angle_d ); //hipFree( xz_exit_angle_d ); hipFree( relative_ut_angle_d ); hipFree( relative_uv_angle_d ); } __global__ void sum_differences_kernel ( int num_histories, int* bin_num, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle, float* WEPL, float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle, float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle, float* relative_ut_angle, float* relative_uv_angle ) { float WEPL_difference, rel_ut_angle_difference, rel_uv_angle_difference; int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if( i < num_histories ) { /* float ut_diff = xy_exit_angle[i] - xy_entry_angle[i]; if( fabs(ut_diff) > PI ) { printf("Hello\n"); if( xy_entry_angle[i] > PI ) xy_entry_angle[i] -= TWO_PI; if( xy_exit_angle[i] > PI ) xy_exit_angle[i] -= TWO_PI; ut_diff = xy_exit_angle[i] - xy_entry_angle[i]; } float uv_diff = xz_exit_angle[i] - xz_entry_angle[i]; if( fabs(uv_diff) > PI ) { if( xz_entry_angle[i] > PI ) xz_entry_angle[i] -= TWO_PI; if( xz_exit_angle[i] > PI ) xz_exit_angle[i] -= TWO_PI; uv_diff = xz_exit_angle[i] - xz_entry_angle[i]; }*/ WEPL_difference = WEPL[i] - mean_WEPL[bin_num[i]]; rel_ut_angle_difference = relative_ut_angle[i] - mean_rel_ut_angle[bin_num[i]]; rel_uv_angle_difference = relative_uv_angle[i] - mean_rel_uv_angle[bin_num[i]]; //rel_ut_angle_difference = ut_diff - mean_rel_ut_angle[bin_num[i]]; //rel_uv_angle_difference = uv_diff - mean_rel_uv_angle[bin_num[i]]; atomicAdd( &stddev_WEPL[bin_num[i]], WEPL_difference * WEPL_difference); atomicAdd( &stddev_rel_ut_angle[bin_num[i]], rel_ut_angle_difference * rel_ut_angle_difference ); atomicAdd( &stddev_rel_uv_angle[bin_num[i]], rel_uv_angle_difference * rel_uv_angle_difference ); } } void calculate_std_devs() { dim3 dimBlock( T_BINS ); dim3 dimGrid( V_BINS, ANGULAR_BINS ); hipLaunchKernelGGL(( calculate_std_devs_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, bin_counts_d, stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d ); //hipFree( bin_counts_d ); } __global__ void calculate_std_devs_kernel( int* bin_counts, float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle ) { int v = blockIdx.x, angle = blockIdx.y, t = threadIdx.x; int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS; if( bin_counts[bin] > 0 ) { // SAMPLE_STD_DEV = true/false = 1/0 => std_dev = SUM{i = 1 -> N} [ ( mu - x_i)^2 / ( N - 1/0 ) ] stddev_WEPL[bin] = sqrtf( stddev_WEPL[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) ); stddev_rel_ut_angle[bin] = sqrtf( stddev_rel_ut_angle[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) ); stddev_rel_uv_angle[bin] = sqrtf( stddev_rel_uv_angle[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) ); } syncthreads(); bin_counts[bin] = 0; } void statistical_cuts( int start_position, int num_histories ) { unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; unsigned int mem_size_hist_bools = sizeof(bool) * num_histories; passed_cuts_h = (bool*) calloc (num_histories, sizeof(bool) ); hipMalloc( (void**) &bin_num_d, mem_size_hist_ints ); hipMalloc( (void**) &WEPL_d, mem_size_hist_floats ); hipMalloc( (void**) &xy_entry_angle_d, mem_size_hist_floats ); hipMalloc( (void**) &xz_entry_angle_d, mem_size_hist_floats ); //hipMalloc( (void**) &xy_exit_angle_d, mem_size_hist_floats ); //hipMalloc( (void**) &xz_exit_angle_d, mem_size_hist_floats ); hipMalloc( (void**) &relative_ut_angle_d, mem_size_hist_floats ); hipMalloc( (void**) &relative_uv_angle_d, mem_size_hist_floats ); hipMalloc( (void**) &passed_cuts_d, mem_size_hist_bools ); hipMemcpy( bin_num_d, &bin_num_vector[start_position], mem_size_hist_ints, hipMemcpyHostToDevice ); hipMemcpy( WEPL_d, &WEPL_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice ); hipMemcpy( xy_entry_angle_d, &xy_entry_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice ); hipMemcpy( xz_entry_angle_d, &xz_entry_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice ); //hipMemcpy( xy_exit_angle_d, &xy_exit_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice ); //hipMemcpy( xz_exit_angle_d, &xz_exit_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice ); hipMemcpy( relative_ut_angle_d, &relative_ut_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice ); hipMemcpy( relative_uv_angle_d, &relative_uv_angle_vector[start_position], mem_size_hist_floats, hipMemcpyHostToDevice ); hipMemcpy( passed_cuts_d, passed_cuts_h, mem_size_hist_bools, hipMemcpyHostToDevice ); //puts("Before kernel"); dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid( int( num_histories / THREADS_PER_BLOCK ) + 1 ); hipLaunchKernelGGL(( statistical_cuts_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, num_histories, bin_counts_d, bin_num_d, sinogram_d, WEPL_d, xy_entry_angle_d, xz_entry_angle_d, xy_entry_angle_d, xz_entry_angle_d,//xy_exit_angle_d, xz_exit_angle_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d, stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d, passed_cuts_d, relative_ut_angle_d, relative_uv_angle_d ); //puts("After kernel"); hipMemcpy( passed_cuts_h, passed_cuts_d, mem_size_hist_bools, hipMemcpyDeviceToHost); //printf("start iteration %d\n", iteration ); for( int i = 0; i < num_histories; i++ ) { if( passed_cuts_h[i] ) { //printf("start i = %d\n", i ); //printf("index = %d\n", start_position + i ); bin_num_vector[post_cut_histories] = bin_num_vector[start_position + i]; //gantry_angle_vector[post_cut_histories] = gantry_angle_vector[start_position + i]; WEPL_vector[post_cut_histories] = WEPL_vector[start_position + i]; x_entry_vector[post_cut_histories] = x_entry_vector[start_position + i]; y_entry_vector[post_cut_histories] = y_entry_vector[start_position + i]; z_entry_vector[post_cut_histories] = z_entry_vector[start_position + i]; x_exit_vector[post_cut_histories] = x_exit_vector[start_position + i]; y_exit_vector[post_cut_histories] = y_exit_vector[start_position + i]; z_exit_vector[post_cut_histories] = z_exit_vector[start_position + i]; xy_entry_angle_vector[post_cut_histories] = xy_entry_angle_vector[start_position + i]; xz_entry_angle_vector[post_cut_histories] = xz_entry_angle_vector[start_position + i]; //xy_exit_angle_vector[post_cut_histories] = xy_exit_angle_vector[start_position + i]; //xz_exit_angle_vector[post_cut_histories] = xz_exit_angle_vector[start_position + i]; relative_ut_angle_vector[post_cut_histories] = relative_ut_angle_vector[start_position + i]; relative_uv_angle_vector[post_cut_histories] = relative_uv_angle_vector[start_position + i]; post_cut_histories++; } } //printf("end iteration %d\n", iteration ); } __global__ void statistical_cuts_kernel ( int num_histories, int* bin_counts, int* bin_num, float* sinogram, float* WEPL, float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle, float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle, bool* passed_cuts, float* relative_ut_angle, float* relative_uv_angle ) { int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if( i < num_histories ) { /*float ut_diff = xy_exit_angle[i] - xy_entry_angle[i]; if( ut_diff > PI ) { if( xy_entry_angle[i] > PI ) xy_entry_angle[i] -= TWO_PI; if( xy_exit_angle[i] > PI ) xy_exit_angle[i] -= TWO_PI; ut_diff = xy_exit_angle[i] - xy_entry_angle[i]; } float uv_diff = xz_exit_angle[i] - xz_entry_angle[i]; if( uv_diff > PI ) { if( xz_entry_angle[i] > PI ) xz_entry_angle[i] -= TWO_PI; if( xz_exit_angle[i] > PI ) xz_exit_angle[i] -= TWO_PI; uv_diff = xz_exit_angle[i] - xz_entry_angle[i]; }*/ bool passed_ut_cut = ( fabs( relative_ut_angle[i] - mean_rel_ut_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_ut_angle[bin_num[i]] ) ); bool passed_uv_cut = ( fabs( relative_uv_angle[i] - mean_rel_uv_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_uv_angle[bin_num[i]] ) ); /*bool passed_ut_cut = ( fabs( ut_diff - mean_rel_ut_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_ut_angle[bin_num[i]] ) ); bool passed_uv_cut = ( fabs( uv_diff - mean_rel_uv_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_uv_angle[bin_num[i]] ) );*/ bool passed_WEPL_cut = ( fabs( mean_WEPL[bin_num[i]] - WEPL[i] ) <= ( SIGMAS_TO_KEEP * stddev_WEPL[bin_num[i]] ) ); passed_cuts[i] = passed_ut_cut && passed_uv_cut && passed_WEPL_cut; if( passed_cuts[i] ) { atomicAdd( &sinogram[bin_num[i]], WEPL[i] ); atomicAdd( &bin_counts[bin_num[i]], 1 ); } } } /************************************************************************************************************************************************************/ /*********************************************************************** MLP ********************************************************************************/ /************************************************************************************************************************************************************/ void create_MLP_test_image() { double x, y; //Create space carve object, init to zeros MLP_test_image_h = (int*)calloc( MLP_IMAGE_VOXELS, sizeof(int)); for( int slice = 0; slice < MLP_IMAGE_SLICES; slice++ ) { for( int row = 0; row < MLP_IMAGE_ROWS; row++ ) { for( int column = 0; column < MLP_IMAGE_COLUMNS; column++ ) { x = ( column - MLP_IMAGE_COLUMNS/2 + 0.5) * MLP_IMAGE_VOXEL_WIDTH; y = ( MLP_IMAGE_ROWS/2 - row - 0.5 ) * MLP_IMAGE_VOXEL_HEIGHT; if( pow( x, 2 ) + pow( y, 2 ) <= pow( double(MLP_IMAGE_RECON_CYL_RADIUS), 2) ) MLP_test_image_h[slice * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS + row * MLP_IMAGE_COLUMNS + column] = 1; if( pow( x / MLP_PHANTOM_A, 2 ) + pow( y / MLP_PHANTOM_B, 2 ) <= 1 ) MLP_test_image_h[slice * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS + row * MLP_IMAGE_COLUMNS + column] = 8; } } } } void MLP_test() { char user_response[20]; float x_entry = -3.0; float y_entry = -sqrtf( pow(MLP_IMAGE_RECON_CYL_RADIUS, 2) - pow(x_entry,2) ); float z_entry = 0.0; float x_exit = 2.5; float y_exit = sqrtf( pow(MLP_IMAGE_RECON_CYL_RADIUS, 2) - pow(x_exit,2) ); float z_exit = 0.0; float xy_entry_angle = 25 * PI/180, xz_entry_angle = 0.0; float xy_exit_angle = 45* PI/180, xz_exit_angle = 0.0; float x_in_object, y_in_object, z_in_object; float u_in_object, t_in_object, v_in_object; float x_out_object, y_out_object, z_out_object; float u_out_object, t_out_object, v_out_object; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ float voxel_x, voxel_y, voxel_z; int voxel; int x_move_direction, y_move_direction, z_move_direction; int x_voxel_step, y_voxel_step, z_voxel_step; float x, y, z; float x_inside, y_inside, z_inside; float x_to_go, y_to_go, z_to_go; float delta_x, delta_y, delta_z; float x_extension, y_extension; float x_move, y_move, z_move; bool end_walk, outside_image; bool entered_object = false, exited_object = false; /********************************************************************************************************/ /******************** Determine if and Where the Proton Enters the Actual Object ************************/ /********************************************************************************************************/ /********************************************************************************************/ /************************** Initial and Boundary Conditions *********************************/ /********************************************************************************************/ // Initial Distance Into Voxel x_inside = modf( ( x_entry + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH; y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_entry ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT; z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_entry ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS; //printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z); //printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside); voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); //printf("voxel = %d \n", voxel ); /********************************************************************************************/ /***************************** Path and Walk Information ************************************/ /********************************************************************************************/ // Lengths/Distances as x is Incremented One Voxel delta_x = MLP_IMAGE_VOXEL_WIDTH; delta_y = tanf( xy_entry_angle ) * MLP_IMAGE_VOXEL_WIDTH; delta_z = tanf( xz_entry_angle ) * MLP_IMAGE_VOXEL_WIDTH; if( x_entry == x_exit ) { delta_x = 0; delta_y = MLP_IMAGE_VOXEL_HEIGHT; delta_z = tanf(xz_entry_angle) / tanf(xy_entry_angle) * MLP_IMAGE_VOXEL_HEIGHT; if( y_entry == y_exit ) { delta_x = 0; delta_y = 0; delta_z = MLP_IMAGE_VOXEL_THICKNESS; } } //printf("delta_x = %3f delta_y = %3f delta_z = %3f\n", delta_x, delta_y, delta_z ); x_move = 0, y_move = 0, z_move = 0; /*x_move_direction = ( x_entry <= x_exit ) - ( x_entry > x_exit ); y_move_direction = ( y_entry <= y_exit ) - ( y_entry > y_exit ); z_move_direction = ( z_entry <= z_exit ) - ( z_entry > z_exit );*/ x_move_direction = ( cosf(xy_entry_angle) >= 0 ) - ( cosf(xy_entry_angle) < 0 ); y_move_direction = ( sinf(xy_entry_angle) >= 0 ) - ( sinf(xy_entry_angle) < 0 ); z_move_direction = ( sinf(xy_entry_angle) >= 0 ) - ( sinf(xy_entry_angle) < 0 ); x_voxel_step = x_move_direction; y_voxel_step = -y_move_direction; z_voxel_step = -z_move_direction; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ x = x_entry, y = y_entry, z = z_entry; x_to_go = ( x_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_WIDTH - x_inside ) + ( x_voxel_step <= 0 ) * x_inside; y_to_go = ( y_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_HEIGHT - y_inside ) + ( y_voxel_step <= 0 ) * y_inside; z_to_go = ( z_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_THICKNESS - z_inside ) + ( z_voxel_step <= 0 ) * z_inside; //printf("initial values:\n\tx_to_go = %3f\n\ty_to_go = %3f\n\tz_to_go = %3f\n", x_to_go, y_to_go, z_to_go); outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { entered_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } end_walk = entered_object || outside_image; ///********************************************************************************************/ ///*********************************** Voxel Walk Routine *************************************/ ///********************************************************************************************/ if( z_entry != z_exit ) { while( !end_walk ) { // Change in z for Move to Voxel Edge in x and y x_extension = delta_z/delta_x * x_to_go; y_extension = delta_z/delta_y * y_to_go; if( z_to_go <= x_extension && z_to_go <= y_extension ) { //printf("z_to_go <= x_extension && z_to_go <= y_extension\n"); x_move = delta_x / delta_z * z_to_go; y_move = delta_y / delta_z * z_to_go; z_move = z_to_go; x_to_go -= x_move; y_to_go -= y_move; z_to_go = MLP_IMAGE_VOXEL_THICKNESS; voxel_z += z_voxel_step; if( x_to_go == 0 ) { voxel_x += x_voxel_step; x_to_go = MLP_IMAGE_VOXEL_WIDTH; } if( y_to_go == 0 ) { voxel_y += y_voxel_step; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; } } //If Next Voxel Edge is in x or xy Diagonal else if( x_extension <= y_extension ) { //printf(" x_extension <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; z_move = delta_z / delta_x * x_to_go; x_to_go = MLP_IMAGE_VOXEL_WIDTH; y_to_go -= y_move; z_to_go -= z_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; z_move = delta_z / delta_y * y_to_go; x_to_go -= x_move; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; z_to_go -= z_move; voxel_y += y_voxel_step; } voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { entered_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } x += x_move_direction * x_move; y += y_move_direction * y_move; z += z_move_direction * z_move; end_walk = entered_object || outside_image; } } else { //printf("z_exit == z_entry\n"); while( !end_walk ) { //printf("beginning of loop\n\n"); //printf("x = %3f y = %3f z = %3f\n", x, y, z ); //printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go); //printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n", voxel_x, voxel_y, voxel_z); // Change in x for Move to Voxel Edge in y y_extension = delta_x/delta_y * y_to_go; //printf("y_extension = %3f\n", y_extension); //If Next Voxel Edge is in x or xy Diagonal if( x_to_go <= y_extension ) { //printf(" x_to_go <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; x_to_go = MLP_IMAGE_VOXEL_WIDTH; y_to_go -= y_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; x_to_go -= x_move; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); //printf("end of loop\n\n"); //printf("x_move = %3f y_move = %3f\n", x_move, y_move ); //printf("x = %3f y = %3f z = %3f\n", x, y, z ); //printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go); //printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n\n", voxel_x, voxel_y, voxel_z); outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { entered_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } //printf("MLP_IMAGE_WIDTH/2 = %3f\n MLP_IMAGE_HEIGHT/2 = %3f", MLP_IMAGE_WIDTH/2 , MLP_IMAGE_HEIGHT/2 ); x += x_move_direction * x_move; y += y_move_direction * y_move; end_walk = entered_object || outside_image; //fgets(user_response, sizeof(user_response), stdin); }// end: while( !end_walk ) }//end: else: z_entry != z_exit => z_entry == z_exit if( entered_object ) { x_in_object = x; y_in_object = y; z_in_object = z; } /********************************************************************************************************/ /******************** Determine if and Where the Proton Exited the Actual Object ************************/ /********************************************************************************************************/ /********************************************************************************************/ /************************** Initial and Boundary Conditions *********************************/ /********************************************************************************************/ // Initial Distance Into Voxel x_inside = modf( ( x_exit + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH; y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_exit ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT; z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_exit ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS; //printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z); //printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside); voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); //printf("voxel = %d \n", voxel ); /********************************************************************************************/ /***************************** Path and Walk Information ************************************/ /********************************************************************************************/ // Lengths/Distances as x is Incremented One Voxel delta_x = MLP_IMAGE_VOXEL_WIDTH; delta_y = tanf( xy_exit_angle ) * MLP_IMAGE_VOXEL_WIDTH; delta_z = tanf( xz_exit_angle ) * MLP_IMAGE_VOXEL_WIDTH; if( x_entry == x_exit ) { delta_x = 0; delta_y = MLP_IMAGE_VOXEL_HEIGHT; delta_z = tanf(xz_exit_angle) / tanf(xy_exit_angle) * MLP_IMAGE_VOXEL_HEIGHT; if( y_entry == y_exit ) { delta_x = 0; delta_y = 0; delta_z = MLP_IMAGE_VOXEL_THICKNESS; } } //printf("delta_x = %3f delta_y = %3f delta_z = %3f\n", delta_x, delta_y, delta_z ); x_move = 0, y_move = 0, z_move = 0; //x_move_direction = ( x_exit <= x_entry ) - ( x_exit > x_entry ); //y_move_direction = ( y_exit <= y_entry ) - ( y_exit > y_entry ); //z_move_direction = ( z_exit <= z_entry ) - ( z_exit > z_entry ); x_move_direction = ( cosf(xy_exit_angle) < 0 ) - ( cosf(xy_exit_angle) >= 0 ); y_move_direction = ( sinf(xy_exit_angle) < 0 ) - ( sinf(xy_exit_angle) >= 0 ); z_move_direction = ( sinf(xy_exit_angle) < 0 ) - ( sinf(xy_exit_angle) >= 0 ); x_voxel_step = x_move_direction; y_voxel_step = -y_move_direction; z_voxel_step = -z_move_direction; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ x = x_exit, y = y_exit, z = z_exit; x_to_go = ( x_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_WIDTH - x_inside ) + ( x_voxel_step <= 0 ) * x_inside; y_to_go = ( y_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_HEIGHT - y_inside ) + ( y_voxel_step <= 0 ) * y_inside; z_to_go = ( z_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_THICKNESS - z_inside ) + ( z_voxel_step <= 0 ) * z_inside; //printf("initial values:\n\tx_to_go = %3f\n\ty_to_go = %3f\n\tz_to_go = %3f\n", x_to_go, y_to_go, z_to_go); outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { exited_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } end_walk = exited_object || outside_image; ///********************************************************************************************/ ///*********************************** Voxel Walk Routine *************************************/ ///********************************************************************************************/ if( z_entry != z_exit ) { //printf("z_entry != z_exit\n"); while( !end_walk ) { // Change in z for Move to Voxel Edge in x and y x_extension = delta_z/delta_x * x_to_go; y_extension = delta_z/delta_y * y_to_go; if( z_to_go <= x_extension && z_to_go <= y_extension ) { //printf("z_to_go <= x_extension && z_to_go <= y_extension\n"); x_move = delta_x / delta_z * z_to_go; y_move = delta_y / delta_z * z_to_go; z_move = z_to_go; x_to_go -= x_move; y_to_go -= y_move; z_to_go = MLP_IMAGE_VOXEL_THICKNESS; voxel_z += z_voxel_step; if( x_to_go == 0 ) { voxel_x += x_voxel_step; x_to_go = MLP_IMAGE_VOXEL_WIDTH; } if( y_to_go == 0 ) { voxel_y += y_voxel_step; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; } } //If Next Voxel Edge is in x or xy Diagonal else if( x_extension <= y_extension ) { //printf(" x_extension <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; z_move = delta_z / delta_x * x_to_go; x_to_go = MLP_IMAGE_VOXEL_WIDTH; y_to_go -= y_move; z_to_go -= z_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; z_move = delta_z / delta_y * y_to_go; x_to_go -= x_move; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; z_to_go -= z_move; voxel_y += y_voxel_step; } voxel = int( voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS ); outside_image = ( voxel_x >= MLP_IMAGE_COLUMNS ) || ( voxel_y >= MLP_IMAGE_ROWS ) || ( voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { exited_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } x += x_move_direction * x_move; y += y_move_direction * y_move; z += z_move_direction * z_move; end_walk = exited_object || outside_image; } } else { //printf("z_entry == z_exit\n"); while( !end_walk ) { //printf("beginning of loop\n\n"); //printf("x = %3f y = %3f z = %3f\n", x, y, z ); //printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go); //printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n", voxel_x, voxel_y, voxel_z); // Change in x for Move to Voxel Edge in y y_extension = delta_x/delta_y * y_to_go; //printf("y_extension = %3f\n", y_extension); //If Next Voxel Edge is in x or xy Diagonal if( x_to_go <= y_extension ) { //printf(" x_to_go <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; x_to_go = MLP_IMAGE_VOXEL_WIDTH; y_to_go -= y_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; x_to_go -= x_move; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); /*printf("end of loop\n\n"); printf("x_move = %3f y_move = %3f\n", x_move, y_move ); printf("x = %3f y = %3f z = %3f\n", x, y, z ); printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go); printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n\n", voxel_x, voxel_y, voxel_z);*/ outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { exited_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } //printf("MLP_IMAGE_WIDTH/2 = %3f\n MLP_IMAGE_HEIGHT/2 = %3f",MLP_IMAGE_WIDTH/2 , MLP_IMAGE_HEIGHT/2 ); x += x_move_direction * x_move; y += y_move_direction * y_move; end_walk = exited_object || outside_image; //fgets(user_response, sizeof(user_response), stdin); }// end: while( !end_walk ) }//end: else: z_exit != z_exit => z_exit == z_exit if( exited_object ) { x_out_object = x; y_out_object = y; z_out_object = z; } x_inside = modf( ( x_in_object + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH; y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_in_object ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT; z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_in_object ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS; //printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z); //printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside); voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); int path[1000]; int path_index = 0; double chord_lengths[1000]; MLP_test_image_h[voxel] = 0; path[path_index++] = voxel; u_in_object = ( cosf( xy_entry_angle ) * x_in_object ) + ( sinf( xy_entry_angle ) * y_in_object ); u_out_object = ( cosf( xy_entry_angle ) * x_out_object ) + ( sinf( xy_entry_angle ) * y_out_object ); t_in_object = ( cosf( xy_entry_angle ) * y_in_object ) - ( sinf( xy_entry_angle ) * x_in_object ); t_out_object = ( cosf( xy_entry_angle ) * y_out_object ) - ( sinf( xy_entry_angle ) * x_out_object ); v_in_object = z_in_object; v_out_object = z_out_object; double T_0[2] = { t_in_object, 0 }; double T_2[2] = { t_out_object, xy_exit_angle - xy_entry_angle }; double V_0[2] = { v_in_object, xz_entry_angle }; double V_2[2] = { v_out_object, xz_exit_angle }; double u_2 = abs(u_out_object - u_in_object); double u_0 = 0, u_1 = MLP_u_step; double t_1_previous, v_1_previous; double x_1_previous = x, y_1_previous = y, z_1_previous = z; int voxel_x_previous = voxel_x; int voxel_y_previous = voxel_y; int voxel_z_previous = voxel_z; int voxel_previous = voxel; int voxels_passed; double chord_segment; double chord_fraction; double x_to_edge, y_to_edge, z_to_edge; //fgets(user_response, sizeof(user_response), stdin); while( u_1 <= u_2 - MLP_u_step ) { double R_0[4] = { 1.0, u_1 - u_0, 0.0 , 1.0}; //a,b,c,d double R_0T[4] = { 1.0, 0.0, u_1 - u_0 , 1.0}; //a,c,b,d double R_1[4] = { 1.0, u_2 - u_1, 0.0 , 1.0}; //a,b,c,d double R_1T[4] = { 1.0, 0.0, u_2 - u_1 , 1.0}; //a,c,b,d double sigma_1_coefficient = pow( E_0 * ( 1 + 0.038 * log( (u_1 - u_0)/X_0) ), 2.0 ) / X_0; float sigma_t1 = (A_0/3)*pow(u_1, 3.0) + (A_1/12)*pow(u_1, 4.0) + (A_2/30)*pow(u_1, 5.0) + (A_3/60)*pow(u_1, 6.0) + (A_4/105)*pow(u_1, 7.0) + (A_5/168)*pow(u_1, 8.0); float sigma_t1_theta1 = pow(u_1, 2.0 )*( (A_0/2) + (A_1/6)*u_1 + (A_2/12)*pow(u_1, 2.0) + (A_3/20)*pow(u_1, 3.0) + (A_4/30)*pow(u_1, 4.0) + (A_5/42)*pow(u_1, 5.0) ); float sigma_theta1 = A_0*u_1 + (A_1/2)*pow(u_1, 2.0) + (A_2/3)*pow(u_1, 3.0) + (A_3/4)*pow(u_1, 4.0) + (A_4/5)*pow(u_1, 5.0) + (A_5/6)*pow(u_1, 6.0); double determinant_Sigma_1 = sigma_t1 * sigma_theta1 - pow( sigma_t1_theta1, 2 );//ad-bc double Sigma_1I[4] = // Sigma_1 Inverse = [1/det(Sigma_1)]*{ d, -b, -c, a } { sigma_theta1 / determinant_Sigma_1, -sigma_t1_theta1 / determinant_Sigma_1, -sigma_t1_theta1 / determinant_Sigma_1, sigma_t1 / determinant_Sigma_1 }; double sigma_2_coefficient = pow( E_0 * ( 1 + 0.038 * log( (u_2 - u_1)/X_0 ) ), 2.0 ) / X_0; double sigma_t2 = (A_0/3)*pow(u_2, 3.0) + (A_1/12)*pow(u_2, 4.0) + (A_2/30)*pow(u_2, 5.0) + (A_3/60)*pow(u_2, 6.0) + (A_4/105)*pow(u_2, 7.0) + (A_5/168)*pow(u_2, 8.0) - (A_0/3)*pow(u_1, 3.0) - (A_1/4)*pow(u_1, 4.0) - (A_2/5)*pow(u_1, 5.0) - (A_3/6)*pow(u_1, 6.0) - (A_4/7)*pow(u_1, 7.0) - (A_5/8)*pow(u_1, 8.0) + 2*u_2*( (A_0/2)*pow(u_1, 2.0) + (A_1/3)*pow(u_1, 3.0) + (A_2/4)*pow(u_1, 4.0) + (A_3/5)*pow(u_1, 5.0) + (A_4/6)*pow(u_1, 6.0) + (A_5/7)*pow(u_1, 7.0) ) - pow(u_2, 2.0) * ( A_0*u_1 + (A_1/2)*pow(u_1, 2.0) + (A_2/3)*pow(u_1, 3.0) + (A_3/4)*pow(u_1, 4.0) + (A_4/5)*pow(u_1, 5.0) + (A_5/6)*pow(u_1, 6.0) ); double sigma_t2_theta2 = pow(u_2, 2.0 )*( (A_0/2) + (A_1/6)*u_2 + (A_2/12)*pow(u_2, 2.0) + (A_3/20)*pow(u_2, 3.0) + (A_4/30)*pow(u_2, 4.0) + (A_5/42)*pow(u_2, 5.0) ) - u_2*u_1*( A_0 + (A_1/2)*u_1 + (A_2/3)*pow(u_1, 2.0) + (A_3/4)*pow(u_1, 3.0) + (A_4/5)*pow(u_1, 4.0) + (A_5/6)*pow(u_1, 5.0) ) + pow(u_1, 2.0 )*( (A_0/2) + (A_1/3)*u_1 + (A_2/4)*pow(u_1, 2.0) + (A_3/5)*pow(u_1, 3.0) + (A_4/6)*pow(u_1, 4.0) + (A_5/7)*pow(u_1, 5.0) ); double sigma_theta2 = A_0 * ( u_2 - u_1 ) + ( A_1 / 2 ) * ( pow(u_2, 2.0) - pow(u_1, 2.0) ) + ( A_2 / 3 ) * ( pow(u_2, 3.0) - pow(u_1, 3.0) ) + ( A_3 / 4 ) * ( pow(u_2, 4.0) - pow(u_1, 4.0) ) + ( A_4 / 5 ) * ( pow(u_2, 5.0) - pow(u_1, 5.0) ) + ( A_5 /6 )*( pow(u_2, 6.0) - pow(u_1, 6.0) ); double determinant_Sigma_2 = sigma_t2 * sigma_theta2 - pow( sigma_t2_theta2, 2 );//ad-bc double Sigma_2I[4] = // Sigma_2 Inverse = [1/det(Sigma_2)]*{ d, -b, -c, a } { sigma_theta2 / determinant_Sigma_2, -sigma_t2_theta2 / determinant_Sigma_2, -sigma_t2_theta2 / determinant_Sigma_2, sigma_t2 / determinant_Sigma_2 }; double first_term[4] = { Sigma_1I[0] + R_1T[0] * ( Sigma_2I[0] * R_1[0] + Sigma_2I[1] * R_1[2] ) + R_1T[1] * ( Sigma_2I[2] * R_1[0] + Sigma_2I[3] * R_1[2] ), Sigma_1I[1] + R_1T[0] * ( Sigma_2I[0] * R_1[1] + Sigma_2I[1] * R_1[3] ) + R_1T[1] * ( Sigma_2I[2] * R_1[1] + Sigma_2I[3] * R_1[3] ), Sigma_1I[2] + R_1T[2] * ( Sigma_2I[0] * R_1[0] + Sigma_2I[1] * R_1[2] ) + R_1T[3] * ( Sigma_2I[2] * R_1[0] + Sigma_2I[3] * R_1[2] ), Sigma_1I[3] + R_1T[2] * ( Sigma_2I[0] * R_1[1] + Sigma_2I[1] * R_1[3] ) + R_1T[3] * ( Sigma_2I[2] * R_1[1] + Sigma_2I[3] * R_1[3] ) }; double determinant_first_term = first_term[0] * first_term[3] - first_term[1] * first_term[2]; first_term[0] = first_term[3] / determinant_first_term; first_term[1] = -first_term[1] / determinant_first_term; first_term[2] = -first_term[2] / determinant_first_term; first_term[3] = first_term[0] / determinant_first_term; double second_term[2] = { Sigma_1I[0] * ( R_0[0] * T_0[0] + R_0[1] * T_0[1] ) + Sigma_1I[1] * ( R_0[2] * T_0[0] + R_0[3] * T_0[1] ) + R_1T[0] * ( Sigma_2I[0] * T_2[0] + Sigma_2I[1] * T_2[1] ) + R_1T[1] * ( Sigma_2I[2] * T_2[0] + Sigma_2I[3] * T_2[1] ) , Sigma_1I[2] * ( R_0[0] * T_0[0] + R_0[1] * T_0[1] ) + Sigma_1I[3] * ( R_0[2] * T_0[0] + R_0[3] * T_0[1] ) + R_1T[2] * ( Sigma_2I[0] * T_2[0] + Sigma_2I[1] * T_2[1] ) + R_1T[3] * ( Sigma_2I[2] * T_2[0] + Sigma_2I[3] * T_2[1] ) }; double t_1 = first_term[0] * second_term[0] + first_term[1] * second_term[1]; double theta_1 = first_term[2] * second_term[0] + first_term[3] * second_term[1]; // Do v MLP Now second_term[0] = Sigma_1I[0] * ( R_0[0] * V_0[0] + R_0[1] * V_0[1] ) + Sigma_1I[1] * ( R_0[2] * V_0[0] + R_0[3] * V_0[1] ) + R_1T[0] * ( Sigma_2I[0] * V_2[0] + Sigma_2I[1] * V_2[1] ) + R_1T[1] * ( Sigma_2I[2] * V_2[0] + Sigma_2I[3] * V_2[1] ); second_term[1] = Sigma_1I[2] * ( R_0[0] * V_0[0] + R_0[1] * V_0[1] ) + Sigma_1I[3] * ( R_0[2] * V_0[0] + R_0[3] * V_0[1] ) + R_1T[2] * ( Sigma_2I[0] * V_2[0] + Sigma_2I[1] * V_2[1] ) + R_1T[3] * ( Sigma_2I[2] * V_2[0] + Sigma_2I[3] * V_2[1] ); double v_1 = first_term[0] * second_term[0] + first_term[1] * second_term[1]; double phi_1 = first_term[2] * second_term[0] + first_term[3] * second_term[1]; // Rotate Coordinate From utv to xyz Coordinate System and Determine Which Voxel this Point on the MLP Path is in double x_1 = ( cosf( xy_entry_angle ) * (u_in_object + u_1) ) - ( sinf( xy_entry_angle ) * t_1 ); double y_1 = ( sinf( xy_entry_angle ) * (u_in_object + u_1) ) + ( cosf( xy_entry_angle ) * t_1 ); double z_1 = v_in_object + v_1; x_inside = modf( ( x_1 + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH; y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_1 ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT; z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_1 ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS; x_voxel_step = (voxel_x >= voxel_x_previous ) - (voxel_x <= voxel_x_previous ); y_voxel_step = (voxel_y >= voxel_y_previous ) - (voxel_y <= voxel_y_previous ); z_voxel_step = (voxel_z >= voxel_z_previous ) - (voxel_z <= voxel_z_previous ); x_to_edge = (x_voxel_step < 0) * x_inside + (x_voxel_step > 0) * (VOXEL_WIDTH - x_inside); y_to_edge = (y_voxel_step < 0) * y_inside + (y_voxel_step > 0) * (VOXEL_HEIGHT - y_inside); z_to_edge = (z_voxel_step < 0) * z_inside + (z_voxel_step > 0) * (VOXEL_THICKNESS - z_inside); voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); if( voxel != path[path_index - 1] ) path[path_index++] = voxel; for( int i = 0; i < path_index; i++ ) printf( "path[i] = %d\n", path[i] ); printf( "path_index = %d\n\n", path_index ); fgets(user_response, sizeof(user_response), stdin); MLP_test_image_h[voxel] = 0; voxels_passed = (voxel_x - voxel_x_previous) + (voxel_y - voxel_y_previous) + (voxel_z - voxel_z_previous); chord_segment = sqrt( pow( x_1_previous - x_1, 2 ) + pow( y_1_previous - y_1, 2 ) + pow( z_1_previous - z_1, 2 ) ); if( voxels_passed == 0 ) { chord_lengths[path_index - 1] += chord_segment; } else if( voxels_passed == 1 ) { if( x_voxel_step != 0 ) { chord_fraction = x_to_edge / (x_1_previous - x_1); } else if( y_voxel_step != 0 ) { chord_fraction = y_to_edge / (y_1_previous - y_1); } else { chord_fraction = z_to_edge / (z_1_previous - z_1); } chord_lengths[path_index - 1] += chord_fraction * chord_segment; chord_lengths[path_index] += chord_segment - chord_lengths[path_index - 1]; } else if( voxels_passed == 2 ) { } else if( voxels_passed == 3 ) { } u_1 += MLP_u_step; t_1_previous = t_1; v_1_previous = v_1; x_1_previous = x_1; y_1_previous = y_1; z_1_previous = z_1; voxel_x_previous = voxel_x; voxel_y_previous = voxel_y; voxel_z_previous = voxel_z; voxel_previous = voxel; } } /************************************************************************************************************************************************************/ /************************************************************************ FBP *******************************************************************************/ /************************************************************************************************************************************************************/ void initialize_sinogram() { sinogram_h = (float*) calloc( NUM_BINS, sizeof(float) ); hipMalloc((void**) &sinogram_d, MEM_SIZE_BINS_FLOATS ); hipMemcpy( sinogram_d, sinogram_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice ); } void construct_sinogram() { dim3 dimBlock( T_BINS ); dim3 dimGrid( V_BINS, ANGULAR_BINS ); hipLaunchKernelGGL(( construct_sinogram_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, bin_counts_d, sinogram_d ); //hipMemcpy(sinogram_h, sinogram_d, MEM_SIZE_BINS_FLOATS, hipMemcpyDeviceToHost); //write_float_array_to_files("sinogram", output_directory, output_folder, sinogram_h, COLUMNS, ROWS, 3 ); //bin_counts_h = (int*) calloc( NUM_BINS, sizeof(int) ); //hipMemcpy(bin_counts_h, bin_counts_d, MEM_SIZE_BINS_INTS, hipMemcpyDeviceToHost) ; //write_integer_array_to_file( "bin_counts_post", output_directory, output_folder, bin_counts_h, T_BINS, ANGULAR_BINS, V_BINS ); } __global__ void construct_sinogram_kernel( int* bin_counts, float* sinogram ) { int v = blockIdx.x, angle = blockIdx.y, t = threadIdx.x; int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS; if( bin_counts[bin] > 0 ) sinogram[bin] /= bin_counts[bin]; } void filter() { puts("Doing the filtering..."); sinogram_filtered_h = (float*) calloc( NUM_BINS, sizeof(float) ); hipMalloc((void**) &sinogram_filtered_d, MEM_SIZE_BINS_FLOATS); hipMemcpy( sinogram_filtered_d, sinogram_filtered_h, MEM_SIZE_BINS_FLOATS, hipMemcpyHostToDevice); dim3 dimBlock( T_BINS ); dim3 dimGrid( V_BINS, ANGULAR_BINS ); hipLaunchKernelGGL(( filter_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, sinogram_d, sinogram_filtered_d ); hipMemcpy(sinogram_filtered_h, sinogram_filtered_d, MEM_SIZE_BINS_FLOATS, hipMemcpyDeviceToHost) ; free(sinogram_h); hipFree(sinogram_d); hipFree(sinogram_filtered_d); } __global__ void filter_kernel( float* sinogram, float* sinogram_filtered ) { int t_bin_ref,angle_bin,t_bin,v_bin,t_bin_sep; float filtered,t,v,scale_factor; v_bin = blockIdx.x; angle_bin = blockIdx.y; t_bin = threadIdx.x; v = ( v_bin - V_BINS/2 ) * V_BIN_SIZE + V_BIN_SIZE/2.0; // Loop over strips for this strip for( t_bin_ref = 0; t_bin_ref < T_BINS; t_bin_ref++ ) { t = ( t_bin_ref - T_BINS/2 ) * T_BIN_SIZE + T_BIN_SIZE/2.0; t_bin_sep = t_bin - t_bin_ref; // scale_factor = r . path = cos(theta_{r,path}) scale_factor = SOURCE_RADIUS / sqrtf( SOURCE_RADIUS * SOURCE_RADIUS + t * t + v * v ); switch( FILTER_NUM ) { case 0: // Ram-Lak if( t_bin_sep == 0 ) filtered = 1.0 / ( 8.0 * powf( T_BIN_SIZE, 2.0 ) ); else if( t_bin_sep % 2 == 0 ) filtered = 0; else filtered = -1.0 / ( 2.0 * powf( T_BIN_SIZE * PI * t_bin_sep, 2.0 ) ); case 1: // Shepp-Logan filter filtered = powf( powf(T_BIN_SIZE * PI, 2.0) * ( 1.0 - powf(2 * t_bin_sep, 2.0) ), -1.0 ); } int strip_index = ( v_bin * ANGULAR_BINS * T_BINS ) + ( angle_bin * T_BINS ); sinogram_filtered[strip_index + t_bin] += T_BIN_SIZE * sinogram[strip_index + t_bin_ref] * filtered * scale_factor; } } void backprojection() { puts("Doing the backprojection..."); printf("DEBUG: MEM_SIZE_IMAGE_FLOAT = %u\n", MEM_SIZE_IMAGE_FLOAT); // Allocate host memory puts("DEBUG: Allocate host memory"); char user_response[20]; X_h = (float*) calloc( VOXELS, sizeof(float) ); if( X_h == NULL ) { printf("ERROR: Memory not allocated for X_h!\n"); fgets(user_response, sizeof(user_response), stdin); exit(1); } // Check that we don't have any corruptions up until now for( int i = 0; i < NUM_BINS; i++ ) if( sinogram_filtered_h[i] != sinogram_filtered_h[i] ) printf("We have a nan in bin #%d\n", i); float delta = GANTRY_ANGLE_INTERVAL * ANGLE_TO_RADIANS; // Loop over the voxels for( int slice = 0; slice < SLICES; slice++ ) { for( int column = 0; column < COLUMNS; column++ ) { for( int row = 0; row < ROWS; row++ ) { // Initial Distance Into Voxel /* x_inside = modf( ( x_entry[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH; y_inside = modf( ( RECON_CYL_RADIUS - y_entry[i] ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT; z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry[i] ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS; voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); voxel_x_out = int( ( x_exit[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH ); voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit[i] ) /VOXEL_HEIGHT ); voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit[i] ) /VOXEL_THICKNESS );*/ // Get the spatial co-ordinates of the pixel /* float x, y, z; if( column > COLUMNS/2 ) x = -RECON_CYL_RADIUS + ( column - 0.5 )* VOXEL_WIDTH; else if( column < COLUMNS/2 ) x = -RECON_CYL_RADIUS + ( column + 0.5 )* VOXEL_WIDTH; else x = -RECON_CYL_RADIUS + column* VOXEL_WIDTH; if( column > ROWS/2 ) y = RECON_CYL_RADIUS - (row + 0.5) * VOXEL_HEIGHT; else if( column < ROWS/2 ) y = RECON_CYL_RADIUS - (row - 0.5) * VOXEL_HEIGHT; else y = RECON_CYL_RADIUS - row * VOXEL_HEIGHT; z = -RECON_CYL_HEIGHT / 2.0 + (slice + 0.5) * SLICE_THICKNESS;*/ float x = -RECON_CYL_RADIUS + ( column + 0.5 )* VOXEL_WIDTH; float y = RECON_CYL_RADIUS - (row + 0.5) * VOXEL_HEIGHT; float z = -RECON_CYL_HEIGHT / 2.0 + (slice + 0.5) * SLICE_THICKNESS; //// If the voxel is outside a cylinder contained in the reconstruction volume, set to air if( ( x * x + y * y ) > ( RECON_CYL_RADIUS * RECON_CYL_RADIUS ) ) X_h[( slice * COLUMNS * ROWS) + ( row * COLUMNS ) + column] = 0.00113; else { // Sum over projection angles for( int angle_bin = 0; angle_bin < ANGULAR_BINS; angle_bin++ ) { // Rotate the pixel position to the beam-detector co-ordinate system float u = x * cosf( angle_bin * delta ) + y * sinf( angle_bin * delta ); float t = -x * sinf( angle_bin * delta ) + y * cosf( angle_bin * delta ); float v = z; // Project to find the detector number float detector_number_t = ( t - u *( t / ( SOURCE_RADIUS + u ) ) ) / T_BIN_SIZE + T_BINS/2.0; int t_bin = int( detector_number_t); if( t_bin > detector_number_t ) t_bin -= 1; float eta = detector_number_t - t_bin; // Now project v to get detector number in v axis float detector_number_v = ( v - u * ( v / ( SOURCE_RADIUS + u ) ) ) / V_BIN_SIZE + V_BINS/2.0; int v_bin = int( detector_number_v); if( v_bin > detector_number_v ) v_bin -= 1; float epsilon = detector_number_v - v_bin; // Calculate the fan beam scaling factor float scale_factor = powf( SOURCE_RADIUS / ( SOURCE_RADIUS + u ), 2 ); //bin_num[i] = t_bin + angle_bin * T_BINS + v_bin * T_BINS * ANGULAR_BINS; // Compute the back-projection int bin = t_bin + angle_bin * T_BINS + v_bin * ANGULAR_BINS * T_BINS; int voxel = slice * COLUMNS * ROWS + row * COLUMNS + column; // not sure why this won't compile without calculating the index ahead of time instead inside []s int index = ANGULAR_BINS * T_BINS; //if( ( ( bin + ANGULAR_BINS * T_BINS + 1 ) >= NUM_BINS ) || ( bin < 0 ) ); if( v_bin == V_BINS - 1 || ( bin < 0 ) ) { X_h[voxel] += delta * 2 *( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin] + eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1]) * scale_factor; } //printf("The bin selected for this voxel does not exist!\n Slice: %d\n Column: %d\n Row: %d\n", slice, column, row); else { // not sure why this won't compile without calculating the index ahead of time instead inside []s /*X_h[voxel] += delta * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin] + eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1] + ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index] + eta * epsilon * sinogram_filtered_h[bin + index + 1] ) * scale_factor;*/ X_h[voxel] += delta * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin] + eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1] + ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index] + eta * epsilon * sinogram_filtered_h[bin + index + 1] ) * scale_factor; // Multilpying by the gantry angle interval for each gantry angle is equivalent to multiplying the final answer by 2*PI and is better numerically // so multiplying by delta each time should be replaced by X_h[voxel] *= 2 * PI after all contributions have been made, which is commented out below /*X_h[voxel] += scale_factor * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin] + eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1] + ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index] + eta * epsilon * sinogram_filtered_h[bin + index + 1] );*/ if(X_h[voxel]!=X_h[voxel]) printf("We have a nan in slice %d, column %d, and row %d\n", slice, column, row); } //X_h[voxel] *= 2 * PI; } } } } } free(sinogram_filtered_h); FBP_object_h = (int*) calloc( COLUMNS * ROWS * SLICES, sizeof(int) ); for( int slice = 0; slice < SLICES; slice++ ) { for( int row = 0; row < ROWS; row++ ) { for( int column = 0; column < COLUMNS; column++ ) { float x = -RECON_CYL_RADIUS + ( column + 0.5 )* VOXEL_WIDTH; float y = RECON_CYL_RADIUS - (row + 0.5) * VOXEL_HEIGHT; float d_squared = powf(x, 2) + powf(y, 2); //if(X_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] > FBP_THRESHOLD && (d < powf(RECON_CYL_RADIUS-1.5, 2) ) ) if(X_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] > FBP_THRESHOLD && (d_squared < powf(RECON_CYL_RADIUS, 2) ) ) FBP_object_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] = 1; else FBP_object_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] = 0; } } } //// Set voxels outside reconstruction cylinder to zeros //for( int slice = 0; slice < SLICES; slice++ ) // for( int row = 0; row < ROWS; row++ ) // for( int column = 0; column < COLUMNS; column++ ) // {/* // float xv = ( column - COLUMNS/2 ) * VOXEL_WIDTH; // float yv = ( ROWS/2 - row ) * VOXEL_HEIGHT; // if( ( (xv * xv) + (yv * yv) ) >= float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) ) // FBP_object_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 0;*/ // float xv = int( column - COLUMNS/2 + 0.5); // float yv = int( ROWS/2 - row + 0.5); // if( ( (xv * xv) + (yv * yv) ) >= powf(COLUMNS/2, 2) ) // //if( ( (xv * xv) + (yv * yv) ) >= powf(COLUMNS/2- 3.0, 2) ) // FBP_object_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 0; // } //write_integer_array_to_files( "FBP_object", output_directory, output_folder, FBP_object_h, COLUMNS, ROWS, SLICES ); write_float_array_to_files( "X_h", output_directory, output_folder, X_h, COLUMNS, ROWS, SLICES ); write_integer_array_to_file( "x_FBP", output_directory, output_folder, FBP_object_h, COLUMNS, ROWS, SLICES ); } /************************************************************************************************************************************************************/ /****************************************************************** Image Initialization *******************************************************************/ /************************************************************************************************************************************************************/ void initialize_SC_hull( bool*& SC_hull_h, bool*& SC_hull_d ) { /* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */ /* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */ /* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */ /* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */ // Allocate memory for the hull image on the host and initialize to zeros SC_hull_h = (bool*)calloc( VOXELS, sizeof(bool)); float x, y; // Set the inner cylinder of the hull image to 1s for( int slice = 0; slice < SLICES; slice++ ) for( int row = 0; row < ROWS; row++ ) for( int column = 0; column < COLUMNS; column++ ) { x = ( column - COLUMNS/2 + 0.5) * VOXEL_WIDTH; y = ( ROWS/2 - row - 0.5) * VOXEL_HEIGHT; if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) ) SC_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = true; } // Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU hipMalloc((void**) &SC_hull_d, MEM_SIZE_IMAGE_BOOL); hipMemcpy(SC_hull_d, SC_hull_h, MEM_SIZE_IMAGE_BOOL, hipMemcpyHostToDevice) ; } void initialize_MSC_hull( int*& MSC_hull_h, int*& MSC_hull_d ) { /* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */ /* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */ /* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */ /* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */ // Allocate memory for the hull image on the host and initialize to zeros MSC_hull_h = (int*)calloc( VOXELS, sizeof(int)); float x, y; // Set the inner cylinder of the hull image to 1s for( int slice = 0; slice < SLICES; slice++ ) for( int row = 0; row < ROWS; row++ ) for( int column = 0; column < COLUMNS; column++ ) { x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) ) MSC_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1; } // Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU hipMalloc((void**) &MSC_hull_d, MEM_SIZE_IMAGE_INT); hipMemcpy(MSC_hull_d, MSC_hull_h, MEM_SIZE_IMAGE_INT, hipMemcpyHostToDevice) ; } void initialize_SM_hull( int*& SM_hull_h, int*& SM_hull_d ) { /* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */ /* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */ /* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */ /* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */ // Allocate memory for the hull image on the host and initialize to zeros SM_hull_h = (int*)calloc( VOXELS, sizeof(int)); float x, y; // Set the inner cylinder of the hull image to 1s for( int slice = 0; slice < SLICES; slice++ ) for( int row = 0; row < ROWS; row++ ) for( int column = 0; column < COLUMNS; column++ ) { x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) ) SM_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1; } // Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU hipMalloc((void**) &SM_hull_d, MEM_SIZE_IMAGE_INT); hipMemcpy(SM_hull_d, SM_hull_h, MEM_SIZE_IMAGE_INT, hipMemcpyHostToDevice) ; } void initialize_float_image( float*& float_image_h, float*& float_image_d ) { //Create space carve object, init to zeros float_image_h = (float*)calloc( VOXELS, sizeof(float)); double x, y; // Set inner cylinder to 1s for( int slice = 0; slice < SLICES; slice++ ) for( int row = 0; row < ROWS; row++ ) for( int column = 0; column < COLUMNS; column++ ) { x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; if( ( (x * x) + (y * y) ) < double(RECON_CYL_RADIUS * RECON_CYL_RADIUS) ) float_image_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1; } hipMalloc((void**) &float_image_d, MEM_SIZE_IMAGE_FLOAT); hipMemcpy(float_image_d, float_image_h, MEM_SIZE_IMAGE_FLOAT, hipMemcpyHostToDevice) ; } /************************************************************************************************************************************************************/ /******************************************************************* Hull Detection *************************************************************************/ /************************************************************************************************************************************************************/ __device__ void voxel_walk( bool*& image, float x_entry, float y_entry, float z_entry, float x_exit, float y_exit, float z_exit ) { /********************************************************************************************/ /********************************* Voxel Walk Parameters ************************************/ /********************************************************************************************/ int x_move_direction, y_move_direction, z_move_direction; int x_voxel_step, y_voxel_step, z_voxel_step; float delta_x, delta_y, delta_z; float x_move, y_move, z_move; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ float x, y, z; float x_inside, y_inside, z_inside; float x_to_go, y_to_go, z_to_go; float x_extension, y_extension; float voxel_x, voxel_y, voxel_z; float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out; int voxel; bool outside_image, end_walk; /********************************************************************************************/ /************************** Initial and Boundary Conditions *********************************/ /********************************************************************************************/ // Initial Distance Into Voxel x_inside = modf( ( x_entry + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH; y_inside = modf( ( RECON_CYL_RADIUS - y_entry ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT; z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS; voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); voxel_x_out = int( ( x_exit + RECON_CYL_RADIUS ) /VOXEL_WIDTH ); voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit ) /VOXEL_HEIGHT ); voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit ) /VOXEL_THICKNESS ); voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS); /********************************************************************************************/ /***************************** Path and Walk Information ************************************/ /********************************************************************************************/ // Lengths/Distances as x is Incremented One Voxel delta_x = VOXEL_WIDTH; delta_y = abs( (y_exit - y_entry)/(x_exit - x_entry) * VOXEL_WIDTH ); delta_z = abs( (z_exit - z_entry)/(x_exit - x_entry) * VOXEL_WIDTH ); // Overwrite NaN if Divisors on delta_i Calculations Above if( x_entry == x_exit ) { delta_x = abs( (x_exit - x_entry)/(y_exit - y_entry) * VOXEL_HEIGHT ); delta_y = VOXEL_HEIGHT; delta_z = abs( (z_exit - z_entry)/(y_exit - y_entry) * VOXEL_HEIGHT ); if( y_entry == y_exit ) { delta_x = abs( (x_exit - x_entry)/(z_exit - z_entry) * VOXEL_THICKNESS ); delta_y = abs( (y_exit - y_entry)/(z_exit - z_entry) * VOXEL_THICKNESS );; delta_z = VOXEL_THICKNESS; } } x_move = 0, y_move = 0, z_move = 0; x_move_direction = ( x_entry <= x_exit ) - ( x_entry > x_exit ); y_move_direction = ( y_entry <= y_exit ) - ( y_entry > y_exit ); z_move_direction = ( z_entry <= z_exit ) - ( z_entry > z_exit ); x_voxel_step = x_move_direction; y_voxel_step = -y_move_direction; z_voxel_step = -z_move_direction; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ x = x_entry, y = y_entry, z = z_entry; x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside; y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside; z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside; outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) image[voxel] = 0; end_walk = ( voxel == voxel_out ) || outside_image; //fgets(user_response, sizeof(user_response), stdin); /********************************************************************************************/ /*********************************** Voxel Walk Routine *************************************/ /********************************************************************************************/ if( z_entry != z_exit ) { while( !end_walk ) { // Change in z for Move to Voxel Edge in x and y x_extension = delta_z/delta_x * x_to_go; y_extension = delta_z/delta_y * y_to_go; if( z_to_go <= x_extension && z_to_go <= y_extension ) { //printf("z_to_go <= x_extension && z_to_go <= y_extension\n"); x_move = delta_x / delta_z * z_to_go; y_move = delta_y / delta_z * z_to_go; z_move = z_to_go; x_to_go -= x_move; y_to_go -= y_move; z_to_go = VOXEL_THICKNESS; voxel_z += z_voxel_step; if( x_to_go == 0 ) { voxel_x += x_voxel_step; x_to_go = VOXEL_WIDTH; } if( y_to_go == 0 ) { voxel_y += y_voxel_step; y_to_go = VOXEL_HEIGHT; } } //If Next Voxel Edge is in x or xy Diagonal else if( x_extension <= y_extension ) { //printf(" x_extension <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; z_move = delta_z / delta_x * x_to_go; x_to_go = VOXEL_WIDTH; y_to_go -= y_move; z_to_go -= z_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; z_move = delta_z / delta_y * y_to_go; x_to_go -= x_move; y_to_go = VOXEL_HEIGHT; z_to_go -= z_move; voxel_y += y_voxel_step; } x += x_move_direction * x_move; y += y_move_direction * y_move; z += z_move_direction * z_move; //fgets(user_response, sizeof(user_response), stdin); voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) image[voxel] = 0; end_walk = ( voxel == voxel_out ) || outside_image; } } else { //printf("z_exit == z_entry\n"); while( !end_walk ) { // Change in x for Move to Voxel Edge in y y_extension = delta_x/delta_y * y_to_go; //If Next Voxel Edge is in x or xy Diagonal if( x_to_go <= y_extension ) { //printf(" x_to_go <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; x_to_go = VOXEL_WIDTH; y_to_go -= y_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; x_to_go -= x_move; y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } x += x_move_direction * x_move; y += y_move_direction * y_move; voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) image[voxel] = 0; end_walk = ( voxel == voxel_out ) || outside_image; //fgets(user_response, sizeof(user_response), stdin); }// end: while( !end_walk ) }//end: else: z_entry_h != z_exit_h => z_entry_h == z_exit_h } void SC( int num_histories ) { dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1); hipLaunchKernelGGL(( SC_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, num_histories, SC_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d, x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d ); } __global__ void SC_kernel ( int num_histories, bool* SC_image, int* bin_num, bool* traversed_recon_volume, float* WEPL, float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit ) { int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= SC_THRESHOLD) && (bin_num[i] >= 0) ) { voxel_walk( SC_image, x_entry[i], y_entry[i], z_entry[i], x_exit[i], y_exit[i], z_exit[i] ); }// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= PURE_SC_THRESH) && (bin_num[i] >= 0) ) } /************************************************************************************************************************************************************/ void MSC( int num_histories ) { dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1); hipLaunchKernelGGL(( MSC_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, num_histories, MSC_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d, x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d ); } __global__ void MSC_kernel ( int num_histories, int* MSC_image, int* bin_num, bool* traversed_recon_volume, float* WEPL, float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit ) { int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] < MSC_THRESHOLD) && (bin_num[i] >= 0) ) { //char user_response[20]; /********************************************************************************************/ /********************************* Voxel Walk Parameters ************************************/ /********************************************************************************************/ int x_move_direction, y_move_direction, z_move_direction; int x_voxel_step, y_voxel_step, z_voxel_step; float delta_x, delta_y, delta_z; float x_move, y_move, z_move; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ float x, y, z; float x_inside, y_inside, z_inside; float x_to_go, y_to_go, z_to_go; float x_extension, y_extension; float voxel_x, voxel_y, voxel_z; float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out; int voxel; bool outside_image, end_walk; /********************************************************************************************/ /************************** Initial and Boundary Conditions *********************************/ /********************************************************************************************/ // Initial Distance Into Voxel x_inside = modf( ( x_entry[i] + RECON_CYL_RADIUS) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH; y_inside = modf( ( RECON_CYL_RADIUS - y_entry[i]) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT; z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry[i]) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS; voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); voxel_x_out = int( ( x_exit[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH ); voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit[i] ) /VOXEL_HEIGHT ); voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit[i] ) /VOXEL_THICKNESS ); voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS); /********************************************************************************************/ /***************************** Path and Walk Information ************************************/ /********************************************************************************************/ // Lengths/Distances as x is Incremented One Voxel delta_x = VOXEL_WIDTH; delta_y = abs( (y_exit[i] - y_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH ); delta_z = abs( (z_exit[i] - z_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH ); // Overwrite NaN if Divisors on delta_i Calculations Above if( x_entry[i] == x_exit[i] ) { delta_x = abs( (x_exit[i] - x_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT ); delta_y = VOXEL_HEIGHT; delta_z = abs( (z_exit[i] - z_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT ); if( y_entry[i] == y_exit[i] ) { delta_x = abs( (x_exit[i] - x_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS ); delta_y = abs( (y_exit[i] - y_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );; delta_z = VOXEL_THICKNESS; } } x_move = 0, y_move = 0, z_move = 0; x_move_direction = ( x_entry[i] <= x_exit[i] ) - ( x_entry[i] > x_exit[i] ); y_move_direction = ( y_entry[i] <= y_exit[i] ) - ( y_entry[i] > y_exit[i] ); z_move_direction = ( z_entry[i] <= z_exit[i] ) - ( z_entry[i] > z_exit[i] ); x_voxel_step = x_move_direction; y_voxel_step = -y_move_direction; z_voxel_step = -z_move_direction; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ x = x_entry[i], y = y_entry[i], z = z_entry[i]; x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside; y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside; z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside; outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) atomicAdd( &MSC_image[voxel], 1 ); end_walk = ( voxel == voxel_out ) || outside_image; //fgets(user_response, sizeof(user_response), stdin); /********************************************************************************************/ /*********************************** Voxel Walk Routine *************************************/ /********************************************************************************************/ if( z_entry[i] != z_exit[i] ) { while( !end_walk ) { // Change in z for Move to Voxel Edge in x and y x_extension = delta_z/delta_x * x_to_go; y_extension = delta_z/delta_y * y_to_go; if( z_to_go <= x_extension && z_to_go <= y_extension ) { //printf("z_to_go <= x_extension && z_to_go <= y_extension\n"); x_move = delta_x / delta_z * z_to_go; y_move = delta_y / delta_z * z_to_go; z_move = z_to_go; x_to_go -= x_move; y_to_go -= y_move; z_to_go = VOXEL_THICKNESS; voxel_z += z_voxel_step; if( x_to_go == 0 ) { voxel_x += x_voxel_step; x_to_go = VOXEL_WIDTH; } if( y_to_go == 0 ) { voxel_y += y_voxel_step; y_to_go = VOXEL_HEIGHT; } } //If Next Voxel Edge is in x or xy Diagonal else if( x_extension <= y_extension ) { //printf(" x_extension <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; z_move = delta_z / delta_x * x_to_go; x_to_go = VOXEL_WIDTH; y_to_go -= y_move; z_to_go -= z_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; z_move = delta_z / delta_y * y_to_go; x_to_go -= x_move; y_to_go = VOXEL_HEIGHT; z_to_go -= z_move; voxel_y += y_voxel_step; } x += x_move_direction * x_move; y += y_move_direction * y_move; z += z_move_direction * z_move; //fgets(user_response, sizeof(user_response), stdin); voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) atomicAdd( &MSC_image[voxel], 1 ); end_walk = ( voxel == voxel_out ) || outside_image; } } else { //printf("z_exit[i] == z_entry[i]\n"); while( !end_walk ) { // Change in x for Move to Voxel Edge in y y_extension = delta_x/delta_y * y_to_go; //If Next Voxel Edge is in x or xy Diagonal if( x_to_go <= y_extension ) { //printf(" x_to_go <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; x_to_go = VOXEL_WIDTH; y_to_go -= y_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; x_to_go -= x_move; y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } x += x_move_direction * x_move; y += y_move_direction * y_move; voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) atomicAdd( &MSC_image[voxel], 1 ); end_walk = ( voxel == voxel_out ) || outside_image; //fgets(user_response, sizeof(user_response), stdin); }// end: while( !end_walk ) }//end: else: z_entry[i] != z_exit[i] => z_entry[i] == z_exit[i] }// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= PURE_SC_THRESH) && (bin_num[i] >= 0) ) } void MSC_threshold() { hipMemcpy(MSC_image_h, MSC_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost); write_integer_array_to_files("MSC_image", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES ); dim3 dimBlock( SLICES ); dim3 dimGrid( COLUMNS, ROWS ); hipLaunchKernelGGL(( MSC_threshold_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, MSC_image_d ); hipMemcpy(MSC_image_h, MSC_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost); write_integer_array_to_files("MSC_image_thresholded", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES ); write_integer_array_to_file("x_MSC", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES ); hipFree( MSC_image_d ); free(MSC_image_h); } __global__ void MSC_threshold_kernel( int* MSC_image ) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; int difference, max_difference = 0; if( (row != 0) && (row != ROWS - 1) && (column != 0) && (column != COLUMNS - 1) ) { for( int current_row = row - 1; current_row <= row + 1; current_row++ ) { for( int current_column = column - 1; current_column <= column + 1; current_column++ ) { difference = MSC_image[voxel] - MSC_image[current_column + current_row * COLUMNS + slice * COLUMNS * ROWS]; if( difference > max_difference ) max_difference = difference; } } } syncthreads(); if( max_difference > MSC_DIFF_THRESH ) MSC_image[voxel] = 0; else if( MSC_image[voxel] == 0 ) MSC_image[voxel] = 0; else MSC_image[voxel] = 1; if( x * x + y * y > RECON_CYL_RADIUS * RECON_CYL_RADIUS ) MSC_image[voxel] = 0; } /************************************************************************************************************************************************************/ void SM( int num_histories) { dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1); hipLaunchKernelGGL(( SM_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, num_histories, SM_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d, x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d ); } __global__ void SM_kernel ( int num_histories, int* SM_image, int* bin_num, bool* traversed_recon_volume, float* WEPL, float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit ) { int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; //if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] >= SM_LOWER_THRESHOLD) && (bin_num[i] >= 0) ) //{ // //char user_response[20]; // /********************************************************************************************/ // /********************************* Voxel Walk Parameters ************************************/ // /********************************************************************************************/ // int x_move_direction, y_move_direction, z_move_direction; // int x_voxel_step, y_voxel_step, z_voxel_step; // float delta_x, delta_y, delta_z; // float x_move, y_move, z_move; // /********************************************************************************************/ // /**************************** Status Tracking Information ***********************************/ // /********************************************************************************************/ // float x, y, z; // float x_inside, y_inside, z_inside; // float x_to_go, y_to_go, z_to_go; // float x_extension, y_extension; // float voxel_x, voxel_y, voxel_z; // float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out; // int voxel; // bool outside_image, end_walk; // /********************************************************************************************/ // /************************** Initial and Boundary Conditions *********************************/ // /********************************************************************************************/ // // Initial Distance Into Voxel // x_inside = modf( ( x_entry[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH; // y_inside = modf( ( RECON_CYL_RADIUS - y_entry[i] ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT; // z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry[i] ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS; // voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); // voxel_x_out = int( ( x_exit[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH ); // voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit[i] ) /VOXEL_HEIGHT ); // voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit[i] ) /VOXEL_THICKNESS ); // voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS); // /********************************************************************************************/ // /***************************** Path and Walk Information ************************************/ // /********************************************************************************************/ // // Lengths/Distances as x is Incremented One Voxel // delta_x = VOXEL_WIDTH; // delta_y = abs( (y_exit[i] - y_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH ); // delta_z = abs( (z_exit[i] - z_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH ); // // Overwrite NaN if Divisors on delta_i Calculations Above // if( x_entry[i] == x_exit[i] ) // { // delta_x = abs( (x_exit[i] - x_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT ); // delta_y = VOXEL_HEIGHT; // delta_z = abs( (z_exit[i] - z_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT ); // if( y_entry[i] == y_exit[i] ) // { // delta_x = abs( (x_exit[i] - x_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS ); // delta_y = abs( (y_exit[i] - y_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );; // delta_z = VOXEL_THICKNESS; // } // } // x_move = 0, y_move = 0, z_move = 0; // x_move_direction = ( x_entry[i] <= x_exit[i] ) - ( x_entry[i] > x_exit[i] ); // y_move_direction = ( y_entry[i] <= y_exit[i] ) - ( y_entry[i] > y_exit[i] ); // z_move_direction = ( z_entry[i] <= z_exit[i] ) - ( z_entry[i] > z_exit[i] ); // x_voxel_step = x_move_direction; // y_voxel_step = -y_move_direction; // z_voxel_step = -z_move_direction; // /********************************************************************************************/ // /**************************** Status Tracking Information ***********************************/ // /********************************************************************************************/ // x = x_entry[i], y = y_entry[i], z = z_entry[i]; // x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside; // y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside; // z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside; // // outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); // if( !outside_image ) // atomicAdd( &SM_image[voxel], 1 ); // end_walk = ( voxel == voxel_out ) || outside_image; // //fgets(user_response, sizeof(user_response), stdin); // /********************************************************************************************/ // /*********************************** Voxel Walk Routine *************************************/ // /********************************************************************************************/ // if( z_entry[i] != z_exit[i] ) // { // while( !end_walk ) // { // // Change in z for Move to Voxel Edge in x and y // x_extension = delta_z/delta_x * x_to_go; // y_extension = delta_z/delta_y * y_to_go; // if( z_to_go <= x_extension && z_to_go <= y_extension ) // { // //printf("z_to_go <= x_extension && z_to_go <= y_extension\n"); // x_move = delta_x / delta_z * z_to_go; // y_move = delta_y / delta_z * z_to_go; // z_move = z_to_go; // x_to_go -= x_move; // y_to_go -= y_move; // z_to_go = VOXEL_THICKNESS; // voxel_z += z_voxel_step; // if( x_to_go == 0 ) // { // voxel_x += x_voxel_step; // x_to_go = VOXEL_WIDTH; // } // if( y_to_go == 0 ) // { // voxel_y += y_voxel_step; // y_to_go = VOXEL_HEIGHT; // } // } // //If Next Voxel Edge is in x or xy Diagonal // else if( x_extension <= y_extension ) // { // //printf(" x_extension <= y_extension \n"); // x_move = x_to_go; // y_move = delta_y / delta_x * x_to_go; // z_move = delta_z / delta_x * x_to_go; // x_to_go = VOXEL_WIDTH; // y_to_go -= y_move; // z_to_go -= z_move; // voxel_x += x_voxel_step; // if( y_to_go == 0 ) // { // y_to_go = VOXEL_HEIGHT; // voxel_y += y_voxel_step; // } // } // // Else Next Voxel Edge is in y // else // { // //printf(" y_extension < x_extension \n"); // x_move = delta_x / delta_y * y_to_go; // y_move = y_to_go; // z_move = delta_z / delta_y * y_to_go; // x_to_go -= x_move; // y_to_go = VOXEL_HEIGHT; // z_to_go -= z_move; // voxel_y += y_voxel_step; // } // x += x_move_direction * x_move; // y += y_move_direction * y_move; // z += z_move_direction * z_move; // //fgets(user_response, sizeof(user_response), stdin); // voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); // outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); // if( !outside_image ) // atomicAdd( &SM_image[voxel], 1 ); // end_walk = ( voxel == voxel_out ) || outside_image; // } // } // else // { // //printf("z_exit[i] == z_entry[i]\n"); // while( !end_walk ) // { // // Change in x for Move to Voxel Edge in y // y_extension = delta_x/delta_y * y_to_go; // //If Next Voxel Edge is in x or xy Diagonal // if( x_to_go <= y_extension ) // { // //printf(" x_to_go <= y_extension \n"); // x_move = x_to_go; // y_move = delta_y / delta_x * x_to_go; // x_to_go = VOXEL_WIDTH; // y_to_go -= y_move; // voxel_x += x_voxel_step; // if( y_to_go == 0 ) // { // y_to_go = VOXEL_HEIGHT; // voxel_y += y_voxel_step; // } // } // // Else Next Voxel Edge is in y // else // { // //printf(" y_extension < x_extension \n"); // x_move = delta_x / delta_y * y_to_go; // y_move = y_to_go; // x_to_go -= x_move; // y_to_go = VOXEL_HEIGHT; // voxel_y += y_voxel_step; // } // x += x_move_direction * x_move; // y += y_move_direction * y_move; // voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); // outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); // if( !outside_image ) // atomicAdd( &SM_image[voxel], 1 ); // end_walk = ( voxel == voxel_out ) || outside_image; // //fgets(user_response, sizeof(user_response), stdin); // }// end: while( !end_walk ) // }//end: else: z_entry[i] != z_exit[i] => z_entry[i] == z_exit[i] //}// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] >= SPACE_MODEL_LOWER_THRESHOLD) && (WEPL[i] <= SPACE_MODEL_UPPER_THRESHOLD) && (bin_num[i] >= 0) ) } void MSC_differences() { int* MSC_differences_h = (int*) calloc( VOXELS, sizeof(int) ); int* MSC_differences_d; hipMalloc((void**) &MSC_differences_d, MEM_SIZE_IMAGE_INT ); hipMemcpy( MSC_differences_d, MSC_differences_h, MEM_SIZE_IMAGE_INT, hipMemcpyHostToDevice ); dim3 dimBlock( SLICES ); dim3 dimGrid( COLUMNS, ROWS ); hipLaunchKernelGGL(( MSC_differences_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, MSC_image_d, MSC_differences_d ); } void SM_differences() { int* SM_differences_h = (int*) calloc( VOXELS, sizeof(int) ); int* SM_differences_d; hipMalloc((void**) &SM_differences_d, MEM_SIZE_IMAGE_INT ); hipMemcpy( SM_differences_d, SM_differences_h, MEM_SIZE_IMAGE_INT, hipMemcpyHostToDevice ); hipLaunchKernelGGL(( SM_differences_kernel), dim3(( COLUMNS, ROWS )), dim3(SLICES) , 0, 0, SM_image_d, SM_differences_d ); hipLaunchKernelGGL(( SM_threshold_search_kernel), dim3(( COLUMNS, ROWS )), dim3(SLICES) , 0, 0, SM_image_d, SM_differences_d ); } __global__ void SM_differences_kernel( int* SM_image, int* SM_differences) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( voxel < VOXELS && row < COLUMNS - 1) { SM_differences[voxel] = abs(SM_image[voxel] - SM_image[voxel - ROWS]); if( column < COLUMNS - 1 ) { int difference_right = abs(SM_image[voxel] - SM_image[voxel + 1]); if( difference_right > SM_differences[voxel] ) SM_differences[voxel] = -difference_right; } } /*syncthreads(); int max_difference = 0; int index = 0; voxel = slice * COLUMNS * ROWS; for( ; voxel < voxel + COLUMNS * ROWS; voxel++ ) { if( SM_differences[voxel] > max_difference ) { max_difference = SM_differences[voxel]; index = voxel; } } int threshold = 0; bool down = SM_differences[index] > 0; if( down ) threshold = max(SM_image[index], SM_image[index + COLUMNS]); else threshold = max(SM_image[index], SM_image[index + 1]); syncthreads(); voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( SM_image[voxel] >= SM_THRESHOLD_MULTIPLIER * threshold ) SM_image[voxel] = 0; else SM_image[voxel] = 1;*/ } __global__ void SM_threshold_search_kernel( int* SM_image, int* SM_differences ) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; int max_difference = 0; int index = 0; int voxel = slice * COLUMNS * ROWS; for( ; voxel < voxel + COLUMNS * ROWS; voxel++ ) { if( SM_differences[voxel] > max_difference ) { max_difference = SM_differences[voxel]; index = voxel; } } int threshold = 0; bool down = SM_differences[index] > 0; if( down ) threshold = max(SM_image[index], SM_image[index + COLUMNS]); else threshold = max(SM_image[index], SM_image[index + 1]); syncthreads(); voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( SM_image[voxel] >= SM_THRESHOLD_MULTIPLIER * threshold ) SM_image[voxel] = 0; else SM_image[voxel] = 1; } __global__ void MSC_differences_kernel( int* MSC_image, int* MSC_differences) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( voxel < VOXELS && row < COLUMNS - 1) { float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; if( MSC_image[voxel] - MSC_image[voxel - ROWS] > MSC_DIFF_THRESH ) MSC_image[voxel] = 0; else if( MSC_image[voxel - ROWS] - MSC_image[voxel] > MSC_DIFF_THRESH ) MSC_image[voxel - ROWS] = 0; else if( column < COLUMNS - 1 ) { if( MSC_image[voxel] - MSC_image[voxel + 1] > MSC_DIFF_THRESH ) MSC_image[voxel] = 0; else if( MSC_image[voxel + 1] - MSC_image[voxel] > MSC_DIFF_THRESH ) MSC_image[voxel + 1] = 0; } else MSC_image[voxel] = 1; if( x * x + y * y > RECON_CYL_RADIUS * RECON_CYL_RADIUS ) MSC_image[voxel] = 0; } } void SM_threshold() { // Copy the space modeled image from the GPU to the CPU and write it to file. hipMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost); write_integer_array_to_files("SM_image", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES ); int* SM_differences_h = (int*) calloc( VOXELS, sizeof(int) ); int* SM_differences_d; hipMalloc((void**) &SM_differences_d, MEM_SIZE_IMAGE_INT ); hipMemcpy( SM_differences_d, SM_differences_h, MEM_SIZE_IMAGE_INT, hipMemcpyHostToDevice ); dim3 dimBlock( SLICES ); dim3 dimGrid( COLUMNS, ROWS ); hipLaunchKernelGGL(( carve_differences), dim3(dimGrid), dim3(dimBlock) , 0, 0, SM_differences_d, SM_image_d ); hipMemcpy( SM_differences_h, SM_differences_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost ); int* SM_thresholds_h = (int*) calloc( SLICES, sizeof(int) ); int voxel; int max_difference = 0; for( int slice = 0; slice < SLICES; slice++ ) { for( int pixel = 0; pixel < COLUMNS * ROWS; pixel++ ) { voxel = pixel + slice * COLUMNS * ROWS; if( SM_differences_h[voxel] > max_difference ) { max_difference = SM_differences_h[voxel]; SM_thresholds_h[slice] = SM_image_h[voxel]; } } printf( "Slice %d : The maximum space_model difference = %d and the space_model threshold = %d\n", slice, max_difference, SM_thresholds_h[slice] ); max_difference = 0; } int* SM_thresholds_d; unsigned int threshold_size = SLICES * sizeof(int); hipMalloc((void**) &SM_thresholds_d, threshold_size ); hipMemcpy( SM_thresholds_d, SM_thresholds_h, threshold_size, hipMemcpyHostToDevice ); hipLaunchKernelGGL(( SM_threshold_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, SM_image_d, SM_thresholds_d); hipMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost); //write_integer_array_to_files("space_model_thresholded", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES ); write_integer_array_to_file("x_SM", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES ); hipFree( SM_differences_d ); hipFree( SM_thresholds_d ); hipFree( SM_image_d ); free(SM_differences_h); free(SM_thresholds_h); free(SM_image_h); } __global__ void SM_threshold_kernel( int* SM_image, int* SM_threshold ) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( voxel < VOXELS ) { if( SM_image[voxel] > 1.0 * SM_threshold[slice] ) SM_image[voxel] = 1; else SM_image[voxel] = 0; if( x * x + y * y > RECON_CYL_RADIUS * RECON_CYL_RADIUS ) SM_image[voxel] = 0; } } /************************************************************************************************************************************************************/ __global__ void carve_differences( int* carve_differences, int* image ) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( (row != 0) && (row != ROWS - 1) && (column != 0) && (column != COLUMNS - 1) ) { int difference, max_difference = 0; for( int current_row = row - 1; current_row <= row + 1; current_row++ ) { for( int current_column = column - 1; current_column <= column + 1; current_column++ ) { difference = image[voxel] - image[current_column + current_row * COLUMNS + slice * COLUMNS * ROWS]; if( difference > max_difference ) max_difference = difference; } } carve_differences[voxel] = max_difference; } } void averaging_filter( bool*& image_h, bool*& image_d, int filter_size ) { initialize_SC_hull(image_h, image_d); float threshold = 0; dim3 dimBlock( SLICES ); dim3 dimGrid( COLUMNS, ROWS ); hipLaunchKernelGGL(( averaging_filter_kernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, image_d, filter_size, threshold); hipMemcpy(image_h, image_d, MEM_SIZE_IMAGE_INT, hipMemcpyDeviceToHost) ; write_bool_array_to_file( "test", output_directory, output_folder, image_h, COLUMNS, ROWS, SLICES ); } __global__ void averaging_filter_kernel( bool* image, int filter_size, float threshold ) { int voxel_x = blockIdx.x; int voxel_y = blockIdx.y; int voxel_z = threadIdx.x; int voxel = voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS; int sum = image[voxel]; if( (voxel_x > 0) && (voxel_y > 0) && (voxel_x < COLUMNS - 1) && (voxel_y < ROWS - 1) ) { for( int i = voxel_x - filter_size/2; i <= voxel_x + filter_size/2; i++ ) for( int j = voxel_y - filter_size/2; j <= voxel_y + filter_size/2; j++ ) sum += image[i + j * COLUMNS + voxel_z * COLUMNS * ROWS]; } //value[voxel] = sum > threshold; syncthreads(); image[voxel] = sum > threshold; } /************************************************************************************************************************************************************/ /******************************************************** Memory Transfers, Maintenance, and Cleaning *******************************************************/ /************************************************************************************************************************************************************/ void initial_processing_memory_clean() { free( gantry_angle_h ); hipFree( x_entry_d ); hipFree( y_entry_d ); hipFree( z_entry_d ); hipFree( x_exit_d ); hipFree( y_exit_d ); hipFree( z_exit_d ); hipFree( traversed_recon_volume_d ); hipFree( bin_num_d ); hipFree( WEPL_d); } void post_cut_memory_clean() { free(passed_cuts_h ); free(stddev_rel_ut_angle_h); free(stddev_rel_uv_angle_h); free(stddev_WEPL_h); hipFree( passed_cuts_d ); hipFree( bin_num_d ); hipFree( WEPL_d ); hipFree( xy_entry_angle_d ); hipFree( xz_entry_angle_d ); //hipFree( xy_exit_angle_d ); //hipFree( xz_exit_angle_d ); hipFree( relative_ut_angle_d ); hipFree( relative_uv_angle_d ); hipFree( mean_rel_ut_angle_d ); hipFree( mean_rel_uv_angle_d ); hipFree( mean_WEPL_d ); hipFree( stddev_rel_ut_angle_d ); hipFree( stddev_rel_uv_angle_d ); hipFree( stddev_WEPL_d ); } void resize_vectors( int new_size ) { bin_num_vector.resize( new_size ); //gantry_angle_vector.resize( new_size ); WEPL_vector.resize( new_size ); x_entry_vector.resize( new_size ); y_entry_vector.resize( new_size ); z_entry_vector.resize( new_size ); x_exit_vector.resize( new_size ); y_exit_vector.resize( new_size ); z_exit_vector.resize( new_size ); xy_entry_angle_vector.resize( new_size ); xz_entry_angle_vector.resize( new_size ); //xy_exit_angle_vector.resize( new_size ); //xz_exit_angle_vector.resize( new_size ); relative_ut_angle_vector.resize( new_size ); relative_uv_angle_vector.resize( new_size ); } void shrink_vectors( int new_capacity ) { bin_num_vector.shrink_to_fit(); //gantry_angle_vector.shrink_to_fit(); WEPL_vector.shrink_to_fit(); x_entry_vector.shrink_to_fit(); y_entry_vector.shrink_to_fit(); z_entry_vector.shrink_to_fit(); x_exit_vector.shrink_to_fit(); y_exit_vector.shrink_to_fit(); z_exit_vector.shrink_to_fit(); xy_entry_angle_vector.shrink_to_fit(); xz_entry_angle_vector.shrink_to_fit(); //xy_exit_angle_vector.shrink_to_fit(); //xz_exit_angle_vector.shrink_to_fit(); relative_ut_angle_vector.shrink_to_fit(); relative_uv_angle_vector.shrink_to_fit(); } /************************************************************************************************************************************************************/ /****************************************************** Routines for Writing Data Arrays/Vectors to Disk ****************************************************/ /************************************************************************************************************************************************************/ void write_bool_array_to_files( char* output_filename_base, const char* output_directory, const char* output_folder, bool* bool_array, int x_max, int y_max, int z_max ) { char output_filename[256]; // Write each slice of the array/image to a separate file for(int z = 0; z < z_max; z++) { ofstream output_file; sprintf( output_filename, "%s%s/%s_%d.txt", output_directory, output_folder, output_filename_base, z ); output_file.open(output_filename); for(int y = 0; y < y_max; y++) { for(int x = 0; x < x_max; x++) output_file << bool_array[(z*x_max*y_max)+(y*x_max)+x] << " "; output_file << endl; } output_file.close(); } } void write_bool_array_to_file( char* output_filename_base, const char* output_directory, const char* output_folder, bool* bool_array, int x_max, int y_max, int z_max ) { // Write each slice of the array/image to a single file ofstream output_file; char output_filename[256]; sprintf( output_filename, "%s%s/%s.txt", output_directory, output_folder, output_filename_base ); output_file.open(output_filename); for( int z = 0; z < z_max; z++ ) { for( int y = 0; y < y_max; y++ ) { for( int x = 0; x < x_max; x++ ) output_file << bool_array[( z * x_max * y_max ) + ( y * x_max ) + x] << " "; output_file << endl; } }//607,999 output_file.close(); } void write_integer_array_to_files( char* output_filename_base, const char* output_directory, const char* output_folder, int* integer_array, int x_max, int y_max, int z_max ) { char output_filename[256]; // Write each slice of the array/image to a separate file for(int z = 0; z < z_max; z++) { ofstream output_file; sprintf( output_filename, "%s%s/%s_%d.txt", output_directory, output_folder, output_filename_base, z ); output_file.open(output_filename); for(int y = 0; y < y_max; y++) { for(int x = 0; x < x_max; x++) output_file << integer_array[(z*x_max*y_max)+(y*x_max)+x] << " "; output_file << endl; } output_file.close(); } } void write_integer_array_to_file( char* output_filename_base, const char* output_directory, const char* output_folder, int* integer_array, int x_max, int y_max, int z_max ) { // Write each slice of the array/image to a single file ofstream output_file; char output_filename[256]; sprintf( output_filename, "%s%s/%s.txt", output_directory, output_folder, output_filename_base ); output_file.open(output_filename); for( int z = 0; z < z_max; z++ ) { for( int y = 0; y < y_max; y++ ) { for( int x = 0; x < x_max; x++ ) output_file << integer_array[( z * x_max * y_max ) + ( y * x_max ) + x] << " "; output_file << endl; } }//607,999 output_file.close(); } void write_float_array_to_files( char* output_filename_base, const char* output_directory, const char* output_folder, float* &float_array, int x_max, int y_max, int z_max ) { char output_filename[256]; // Write each slice of the array/image to a separate file for(int z = 0; z < z_max; z++) { ofstream output_file; sprintf( output_filename, "%s%s/%s_%d.txt", output_directory, output_folder, output_filename_base, z ); output_file.open(output_filename); for(int y = 0; y < y_max; y++) { for(int x = 0; x < x_max; x++) output_file << float_array[ ( z * x_max * y_max ) + ( y * x_max ) + x ] << " "; output_file << endl; } output_file.close(); } } void write_float_array_to_file( char* output_filename_base, const char* output_directory, const char* output_folder, float* float_array, int x_max, int y_max, int z_max ) { // Write each slice of the array/image to a single file ofstream output_file; char output_filename[256]; sprintf( output_filename, "%s%s/%s.txt", output_directory, output_folder, output_filename_base ); output_file.open(output_filename); for( int z = 0; z < z_max; z++ ) { for( int y = 0; y < y_max; y++ ) { for( int x = 0; x < x_max; x++ ) output_file << float_array[( z * x_max * y_max ) + ( y * x_max ) + x] << " "; output_file << endl; } }//607,999 output_file.close(); } void write_float_vector_to_file( char* output_filename_base, const char* output_directory, const char* output_folder, vector<float> vector_array, int x_max, int y_max, int z_max ) { // Write each slice of the vector/image to a single file ofstream output_file; char output_filename[256]; sprintf( output_filename, "%s%s/%s.txt", output_directory, output_folder, output_filename_base ); output_file.open(output_filename); for( int z = 0; z < z_max; z++ ) { for( int y = 0; y < y_max; y++ ) { for( int x = 0; x < x_max; x++ ) output_file << vector_array[( z * x_max * y_max ) + ( y * x_max ) + x] << " "; output_file << endl; } }//607,999 output_file.close(); } /************************************************************************************************************************************************************/ /********************************************************************* Helper Functions *********************************************************************/ /************************************************************************************************************************************************************/ bool bad_data_angle( int angle ) { static const int bad_angles_array[] = {80, 84, 88, 92, 96, 100, 00, 180, 260, 264, 268, 272, 276}; vector<int> bad_angles(bad_angles_array, bad_angles_array + sizeof(bad_angles_array) / sizeof(bad_angles_array[0]) ); bool bad_angle = false; for( int i = 0; i < bad_angles.size(); i++ ) if( angle == bad_angles[i] ) bad_angle = true; return bad_angle; } int calculate_x_voxel(float x_position, int x_voxels, float voxel_width ) { // -10 100 1 [-50 49] -40 float x_width = x_voxels * voxel_width;//100 float x_range = x_width/2;//50 return ( x_position + x_range) / voxel_width;//-10+50/1 = 40 //[0 99] } int calculate_y_voxel(float y_position, int y_voxels, float voxel_height ) { // 10 100 1 [-50 49] 40 float y_width = y_voxels * voxel_height;//100 float y_range = y_width/2;//50 return ( y_range - y_position ) / voxel_height; } int calculate_slice(float z_position, int z_voxels, float voxel_thickness ) { // -10 100 1 [-50 49] -40 float z_width = z_voxels * voxel_thickness;//100 float z_range = z_width/2;//50 return ( z_range - z_position ) / voxel_thickness; } /************************************************************************************************************************************************************/ /****************************************************************** Testing Functions ***********************************************************************/ /************************************************************************************************************************************************************/ void test_func() { char user_response[20]; //fgets(user_response, sizeof(user_response), stdin); bool* passed_cuts_h = (bool*)calloc (30, sizeof(bool)); for( int i = 0; i < 30; i++ ) { bin_num_vector.push_back(i); WEPL_vector.push_back(i); x_entry_vector.push_back(i); y_entry_vector.push_back(i); z_entry_vector.push_back(i); x_exit_vector.push_back(i); y_exit_vector.push_back(i); z_exit_vector.push_back(i); xy_entry_angle_vector.push_back(i); xz_entry_angle_vector.push_back(i); xy_exit_angle_vector.push_back(i); xz_exit_angle_vector.push_back(i); passed_cuts_h[i] = i%2; } for( int i = 0; i < 30; i++ ) { printf("bin_num_vector[%d] = %d\n", i, bin_num_vector[i]); printf("WEPL_vector[%d] = %3f\n", i, WEPL_vector[i]); printf("x_entry_vector[%d] = %3f\n", i, x_entry_vector[i]); printf("y_entry_vector[%d] = %3f\n", i, y_entry_vector[i]); printf("z_entry_vector[%d] = %3f\n", i, z_entry_vector[i]); printf("x_exit_vector[%d] = %3f\n", i, x_exit_vector[i]); printf("y_exit_vector[%d] = %3f\n", i, y_exit_vector[i]); printf("z_exit_vector[%d] = %3f\n", i, z_exit_vector[i]); printf("xy_entry_angle_vector[%d] = %3f\n", i, xy_entry_angle_vector[i]); printf("xz_entry_angle_vector[%d] = %3f\n", i, xz_entry_angle_vector[i]); printf("xy_exit_angle_vector[%d] = %3f\n", i, xy_exit_angle_vector[i]); printf("xz_exit_angle_vector[%d] = %3f\n", i, xz_exit_angle_vector[i]); printf("passed_cuts_h[%d] = %d\n", i, passed_cuts_h[i]); fgets(user_response, sizeof(user_response), stdin); } int start_position = 0; int post_cut_histories = 0; for( int iteration = 0; iteration < 6; iteration++ ) { printf("start iteration %d\n", iteration ); for( int i = 0; i < 5; i++ ) { if( passed_cuts_h[start_position + i] ) { printf("start i = %d\n", i ); printf("index = %d\n", start_position + i ); bin_num_vector[post_cut_histories] = bin_num_vector[start_position + i]; WEPL_vector[post_cut_histories] = WEPL_vector[start_position + i]; x_entry_vector[post_cut_histories] = x_entry_vector[start_position + i]; y_entry_vector[post_cut_histories] = y_entry_vector[start_position + i]; z_entry_vector[post_cut_histories] = z_entry_vector[start_position + i]; x_exit_vector[post_cut_histories] = x_exit_vector[start_position + i]; y_exit_vector[post_cut_histories] = y_exit_vector[start_position + i]; z_exit_vector[post_cut_histories] = z_exit_vector[start_position + i]; xy_entry_angle_vector[post_cut_histories] = xy_entry_angle_vector[start_position + i]; xz_entry_angle_vector[post_cut_histories] = xz_entry_angle_vector[start_position + i]; xy_exit_angle_vector[post_cut_histories] = xy_exit_angle_vector[start_position + i]; xz_exit_angle_vector[post_cut_histories] = xz_exit_angle_vector[start_position + i]; post_cut_histories++; printf("end i = %d\n", i ); } } start_position += 5; printf("end iteration %d\n", iteration ); } bin_num_vector.resize(post_cut_histories); WEPL_vector.resize(post_cut_histories); x_entry_vector.resize(post_cut_histories); y_entry_vector.resize(post_cut_histories); z_entry_vector.resize(post_cut_histories); x_exit_vector.resize(post_cut_histories); y_exit_vector.resize(post_cut_histories); z_exit_vector.resize(post_cut_histories); xy_entry_angle_vector.resize(post_cut_histories); xz_entry_angle_vector.resize(post_cut_histories); xy_exit_angle_vector.resize(post_cut_histories); xz_exit_angle_vector.resize(post_cut_histories); printf("post_cuts\n\n\n"); printf("post_cut_histories = %d\n\n", post_cut_histories); for( int i = 0; i < post_cut_histories; i++ ) { printf("bin_num_vector[%d] = %d\n", i, bin_num_vector[i]); printf("WEPL_vector[%d] = %3f\n", i, WEPL_vector[i]); printf("x_entry_vector[%d] = %3f\n", i, x_entry_vector[i]); printf("y_entry_vector[%d] = %3f\n", i, y_entry_vector[i]); printf("z_entry_vector[%d] = %3f\n", i, z_entry_vector[i]); printf("x_exit_vector[%d] = %3f\n", i, x_exit_vector[i]); printf("y_exit_vector[%d] = %3f\n", i, y_exit_vector[i]); printf("z_exit_vector[%d] = %3f\n", i, z_exit_vector[i]); printf("xy_entry_angle_vector[%d] = %3f\n", i, xy_entry_angle_vector[i]); printf("xz_entry_angle_vector[%d] = %3f\n", i, xz_entry_angle_vector[i]); printf("xy_exit_angle_vector[%d] = %3f\n", i, xy_exit_angle_vector[i]); printf("xz_exit_angle_vector[%d] = %3f\n", i, xz_exit_angle_vector[i]); printf("passed_cuts_h[%d] = %d\n", i, passed_cuts_h[i]); fgets(user_response, sizeof(user_response), stdin); } } __global__ void test_func_kernel( int* test_array, int vec_array_elements ) { for(int i = 0; i < vec_array_elements; i++ ) test_array[i] *= 2; }
f1e1710020a59d98564300f3bad5cd9018ff83d4.cu
//********************************************************************************************************************************************************// //*********************************************** Proton CT Preprocessing and Image Reconstruction Code *************************************************// //********************************************************************************************************************************************************// #include "pCT_Reconstruction.h" //********************************************************************************************************************************************************// //********************************************************************** Host Code ***********************************************************************// //********************************************************************************************************************************************************// // Preprocessing setup and initializations void assign_SSD_positions(); void initializations(); void count_histories_old(); void count_histories_v0(); void count_histories_v1(); void reserve_vector_capacity(); // Preprocessing routines void iterative_data_read_old( int, int, int ); void iterative_data_read_v0( int, int, int ); void iterative_data_read_v1( int, int, int ); void recon_volume_intersections( int ); void bin_valid_histories( int ); void calculate_means(); void sum_differences( int, int ); void calculate_std_devs(); void statistical_cuts( int, int ); void initialize_sinogram(); void construct_sinogram(); void filter(); void backprojection(); // Hull-Detection void initialize_SC_hull( bool*&, bool*& ); void initialize_MSC_hull( int*&, int*& ); void initialize_SM_hull( int*&, int*& ); void initialize_float_image( float*&, float*& ); void SC( int ); void MSC( int ); void SM( int ); void MSC_differences(); void SM_differences(); void MSC_threshold(); void SM_threshold(); void averaging_filter( bool*&, bool*&, int); // MLP void create_MLP_test_image(); // In development void MLP_test(); // In development // Write arrays/vectors to file void write_bool_array_to_files( char*, const char*, const char*, bool*, int, int, int ); void write_bool_array_to_file( char*, const char*, const char* , bool*, int, int, int ); void write_integer_array_to_files( char*, const char*, const char* , int*, int, int, int ); void write_integer_array_to_file( char*, const char*, const char* , int*, int, int, int ); void write_float_array_to_files( char*, const char*, const char* , float*&, int, int, int ); void write_float_array_to_file( char*, const char*, const char* , float*, int, int, int ); void write_float_vector_to_file( char*, const char*, const char* , vector<float>, int, int, int ); // Memory transfers and allocations/deallocations void post_cut_memory_clean(); void resize_vectors( int ); void shrink_vectors( int ); void initial_processing_memory_clean(); // Helper Functions bool bad_data_angle( int ); int calculate_x_voxel(float, int, float); int calculate_y_voxel(float, int, float); int calculate_slice(float, int, float); // New routine test functions void test_func(); //********************************************************************************************************************************************************// //****************************************************************** Device (GPU) Code *******************************************************************// //********************************************************************************************************************************************************// // Preprocessing routines __global__ void recon_volume_intersections_kernel( int, int*, bool*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*); __global__ void bin_valid_histories_kernel( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float* ); __global__ void calculate_means_kernel( int*, float*, float*, float* ); __global__ void sum_differences_kernel( int, int*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float* ); __global__ void calculate_std_devs_kernel( int*, float*, float*, float* ); __global__ void statistical_cuts_kernel( int, int*, int*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, float*, bool*, float*, float* ); __global__ void construct_sinogram_kernel( int*, float* ); __global__ void filter_kernel( float*, float* ); // Hull-Detection __device__ void voxel_walk( bool*&, float, float, float, float, float, float ); __global__ void SC_kernel( int, bool*, int*, bool*, float*, float*, float*, float*, float*, float*, float* ); __global__ void MSC_kernel( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float* ); __global__ void SM_kernel( int, int*, int*, bool*, float*, float*, float*, float*, float*, float*, float* ); __global__ void MSC_differences_kernel( int*, int* ); __global__ void SM_differences_kernel( int*, int* ); __global__ void SM_threshold_search_kernel( int*, int* ); __global__ void MSC_threshold_kernel( int* ); __global__ void SM_threshold_kernel( int*, int* ); __global__ void carve_differences( int*, int* ); __global__ void averaging_filter_kernel( bool*, int, float ); // New routine test functions __global__ void test_func_kernel( int*, int); /************************************************************************************************************************************************************/ /******************************************************************** Program Main **************************************************************************/ /************************************************************************************************************************************************************/ int main(int argc, char** argv) { char user_response[20]; /* puts("Hit enter to stop..."); fgets(user_response, sizeof(user_response), stdin); exit(1); */ /********************************************************************************************/ /* Start the Execution Timing Clock */ /********************************************************************************************/ clock_t start,end; start = clock(); /********************************************************************************************/ /* Initialize Hull Detection Images and Transfer Them to the GPU */ /********************************************************************************************/ if( SC_ON ) initialize_SC_hull( SC_image_h, SC_image_d ); if( MSC_ON ) initialize_MSC_hull( MSC_image_h, MSC_image_d ); if( SM_ON ) initialize_SM_hull( SM_image_h, SM_image_d ); /********************************************************************************************/ /* Read the u-Coordinates of the Detector Planes from the Config File, Allocate and */ /* Initialize Statistical Data Arrays, and Count the Number of Histories Per File, */ /* Projection, Gantry Angle, Scan, and Total. Request Input from User to Continue. */ /********************************************************************************************/ puts("Reading tracker plane positions and initializing storage arrays..."); if( CONFIG_FILE) assign_SSD_positions(); // Read the detector plane u-coordinates from config file initializations(); // allocate and initialize host and GPU memory for binning if( VERSION_OLD ) count_histories_old(); // count the number of histories per file, per scan, total, etc. else if( VERSION_0 ) count_histories_v0(); // count the number of histories per file, per scan, total, etc. else count_histories_v1(); /********************************************************************************************/ /* Iteratively Read and Process Data One Chunk at a Time. There are at Most */ /* MAX_GPU_HISTORIES Per Chunk (i.e. Iteration). On Each Iteration: */ /* (1) Read Data from File */ /* (2) Determine Which Histories Traverse the Reconstruction Volume and Store this */ /* Information in a Boolean Array */ /* (3) Determine Which Bin Each History Belongs to */ /* (4) Use the Boolean Array to Determine Which Histories to Keep and then Push */ /* the Intermediate Data from these Histories onto the Permanent Storage Vectors */ /* (5) Free Up Temporary Host/GPU Array Memory Allocated During Iteration */ /********************************************************************************************/ puts("Iteratively Reading Data from Hard Disk"); puts("Removing Proton Histories that Don't Pass Through the Reconstruction Volume"); puts("Binning the Data from Those that Did..."); int start_file_num = 0, end_file_num = 0, histories_to_process = 0; while( start_file_num != NUM_FILES ) { while( end_file_num < NUM_FILES ) { if( histories_to_process + histories_per_file[end_file_num] < MAX_GPU_HISTORIES ) histories_to_process += histories_per_file[end_file_num]; else break; end_file_num++; } if( VERSION_OLD ) iterative_data_read_old( histories_to_process, start_file_num, end_file_num - 1 ); else if( VERSION_0 ) iterative_data_read_v0( histories_to_process, start_file_num, end_file_num - 1 ); else iterative_data_read_v1( histories_to_process, start_file_num, end_file_num - 1 ); recon_volume_intersections( histories_to_process ); bin_valid_histories( histories_to_process ); if( SC_ON && (!bad_data_angle( gantry_angle_h[0] ) || !RESTRICTED_ANGLES ) ) SC( histories_to_process ); if( MSC_ON ) MSC( histories_to_process ); if( SM_ON ) SM( histories_to_process ); initial_processing_memory_clean(); start_file_num = end_file_num; histories_to_process = 0; } exit(1); /********************************************************************************************/ /* Shrink vectors so capacity reduced to size, which is number of histories remaining after */ /* histories that didn't intersect reconstruction volume were ignored */ /********************************************************************************************/ shrink_vectors( recon_vol_histories ); /********************************************************************************************/ /* Perform Thresholding on MSC and SM Hulls and Write All Hull Images to File */ /********************************************************************************************/ puts("\nPerforming Hull Thresholding and Writing Hull Images to Disk..."); if( SC_ON ) { cudaMemcpy(SC_image_h, SC_image_d, MEM_SIZE_IMAGE_BOOL, cudaMemcpyDeviceToHost); write_bool_array_to_file("x_sc", output_directory, output_folder, SC_image_h, COLUMNS, ROWS, SLICES ); } if( MSC_ON ) MSC_threshold(); if( SM_ON ) SM_threshold(); exit(1); /********************************************************************************************/ /* Calculate the Mean WEPL, Relative ut-Angle, and Relative uv-Angle for Each Bin and Count */ /* the Number of Histories in Each Bin */ ///********************************************************************************************/ puts("Calculating the Mean for Each Bin Before Cuts..."); calculate_means(); /********************************************************************************************/ /* Calculate the Standard Deviation in WEPL, Relative ut-Angle, and Relative uv-Angle for */ /* Each Bin. Iterate Through the Valid History Vectors One Chunk at a Time, With at Most */ /* MAX_GPU_HISTORIES Per Chunk, and Calculate the Difference Between the Mean WEPL and WEPL,*/ /* Mean Relative ut-Angle and Relative ut-Angle, and Mean Relative uv-Angle and Relative */ /* uv-Angle for Each History. The Standard Deviation is then Found By Calculating the Sum */ /* of these Differences for Each Bin and Dividing it by the Number of Histories in the Bin */ /********************************************************************************************/ puts("Summing up the Difference Between Individual Measurements and the Mean for Each Bin..."); int remaining_histories = recon_vol_histories; int start_position = 0; while( remaining_histories > 0 ) { if( remaining_histories > MAX_GPU_HISTORIES ) histories_to_process = MAX_GPU_HISTORIES; else histories_to_process = remaining_histories; sum_differences( start_position, histories_to_process ); remaining_histories -= MAX_GPU_HISTORIES; start_position += MAX_GPU_HISTORIES; } puts("Calculating Standard Deviations for Each Bin..."); calculate_std_devs(); /********************************************************************************************/ /* Allocate Memory for the Sinogram on the Host, Initialize it to Zeros, Allocate Memory */ /* for it on the GPU, then Transfer the Initialized Sinogram to the GPU */ /********************************************************************************************/ initialize_sinogram(); /********************************************************************************************/ /* Iterate Through the Valid History Vectors One Chunk at a Time, With at Most */ /* MAX_GPU_HISTORIES Per Chunk, and Perform Statistical Cuts */ /********************************************************************************************/ puts("Performing Statistical Cuts..."); remaining_histories = recon_vol_histories, start_position = 0; while( remaining_histories > 0 ) { if( remaining_histories > MAX_GPU_HISTORIES ) histories_to_process = MAX_GPU_HISTORIES; else histories_to_process = remaining_histories; statistical_cuts( start_position, histories_to_process ); remaining_histories -= MAX_GPU_HISTORIES; start_position += MAX_GPU_HISTORIES; } printf("%d out of %d histories passed cuts\n", post_cut_histories, total_histories ); /********************************************************************************************/ /* Free the host memory for the bin number array and gpu memory for the statistics arrays */ /* and shrink the vectors to fit exactly the number of histories that passed cuts */ /********************************************************************************************/ puts("Freeing unnecessary memory and shrinking vectors to just fit remaining histories..."); post_cut_memory_clean(); resize_vectors( post_cut_histories ); shrink_vectors( post_cut_histories ); /********************************************************************************************/ /* Recalculate the Mean WEPL for Each Bin Using the Histories Remaining After Cuts and Use */ /* these to Produce the Sinogram */ ///********************************************************************************************/ puts("Calculating the Elements of the Sinogram..."); construct_sinogram(); /********************************************************************************************/ /* Perform Filtered Backprojection and Write FBP Hull to Disk */ /********************************************************************************************/ if( FBP_ON ) { filter(); backprojection(); } /********************************************************************************************/ /* End Program Execution Timing Clock and Print the Total Execution Time to Console Window */ /********************************************************************************************/ //end = clock(); //printf("Total execution time : %3f\n",(double)(end-start)/1000); /********************************************************************************************/ /* Program Has Finished Execution. Require the User to Hit the Enter Key to Terminate the */ /* Program and Close the Terminal/Console Window */ /********************************************************************************************/ puts("Preprocessing complete. Press any key to close the console window..."); fgets(user_response, sizeof(user_response), stdin); } /************************************************************************************************************************************************************/ /******************************************************** Preprocessing Setup and Initializations ***********************************************************/ /************************************************************************************************************************************************************/ void assign_SSD_positions() //HERE THE COORDINATES OF THE DETECTORS PLANES ARE LOADED, THE CONFIG FILE IS CREATED BY FORD (RWS) { char user_response[20]; char configFilename[512]; sprintf(configFilename, "%s%s\\scan.cfg", input_directory, input_folder); if( DEBUG_TEXT_ON ) printf("Opening config file %s...\n", configFilename); ifstream configFile(configFilename); if( !configFile.is_open() ) { printf("ERROR: config file not found at %s!\n", configFilename); fputs("Didn't Find File", stdout); fflush(stdout); printf("text = \"%s\"\n", user_response); fgets(user_response, sizeof(user_response), stdin); exit(1); } else { fputs("Found File", stdout); fflush(stdout); printf("user_response = \"%s\"\n", user_response); } if( DEBUG_TEXT_ON ) puts("Reading Tracking Plane Positions..."); for( int i = 0; i < 8; i++ ) { configFile >> SSD_u_Positions[i]; if( DEBUG_TEXT_ON ) printf("SSD_u_Positions[%d] = %3f", i, SSD_u_Positions[i]); } configFile.close(); } void initializations() { for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ ) histories_per_scan[scan_number] = 0; histories_per_file = (int*) calloc( NUM_SCANS * GANTRY_ANGLES, sizeof(int) ); histories_per_gantry_angle = (int*) calloc( GANTRY_ANGLES, sizeof(int) ); recon_vol_histories_per_projection = (int*) calloc( GANTRY_ANGLES, sizeof(int) ); bin_counts_h = (int*) calloc( NUM_BINS, sizeof(int) ); mean_WEPL_h = (float*) calloc( NUM_BINS, sizeof(float) ); mean_rel_ut_angle_h = (float*) calloc( NUM_BINS, sizeof(float) ); mean_rel_uv_angle_h = (float*) calloc( NUM_BINS, sizeof(float) ); stddev_rel_ut_angle_h = (float*) calloc( NUM_BINS, sizeof(float) ); stddev_rel_uv_angle_h = (float*) calloc( NUM_BINS, sizeof(float) ); stddev_WEPL_h = (float*) calloc( NUM_BINS, sizeof(float) ); cudaMalloc((void**) &bin_counts_d, MEM_SIZE_BINS_INTS ); cudaMalloc((void**) &mean_WEPL_d, MEM_SIZE_BINS_FLOATS ); cudaMalloc((void**) &mean_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS ); cudaMalloc((void**) &mean_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS ); cudaMalloc((void**) &stddev_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS ); cudaMalloc((void**) &stddev_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS ); cudaMalloc((void**) &stddev_WEPL_d, MEM_SIZE_BINS_FLOATS ); cudaMemcpy( bin_counts_d, bin_counts_h, MEM_SIZE_BINS_INTS, cudaMemcpyHostToDevice ); cudaMemcpy( mean_WEPL_d, mean_WEPL_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice ); cudaMemcpy( mean_rel_ut_angle_d, mean_rel_ut_angle_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice ); cudaMemcpy( mean_rel_uv_angle_d, mean_rel_uv_angle_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice ); cudaMemcpy( stddev_rel_ut_angle_d, stddev_rel_ut_angle_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice ); cudaMemcpy( stddev_rel_uv_angle_d, stddev_rel_uv_angle_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice ); cudaMemcpy( stddev_WEPL_d, stddev_WEPL_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice ); } void count_histories_old() { if( DEBUG_TEXT_ON ) printf("Counting histories...\n"); char user_response[20]; char data_filename[128]; int file_size, num_histories, file_number = 0, gantry_position_number = 0; for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ ) { for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ ) { sprintf( data_filename, "%s%s/%s_trans%d_%03d%s", input_directory, input_folder, input_base_name, scan_number, gantry_angle, file_extension ); //printf("Name = %s", data_filename ); FILE *data_file = fopen(data_filename, "rb"); if( data_file == NULL ) { fputs( "Error Opening Data File: Check that the directories are properly named.", stderr ); fgets(user_response, sizeof(user_response), stdin); exit(1); } fseek( data_file, 0, SEEK_END ); file_size = ftell( data_file ); if( BINARY_ENCODING ) { if( file_size % BYTES_PER_HISTORY ) { printf("ERROR! Problem with bytes_per_history!\n"); fgets(user_response, sizeof(user_response), stdin); exit(2); } num_histories = file_size / BYTES_PER_HISTORY; } else num_histories = file_size; fclose(data_file); histories_per_file[file_number] = num_histories; histories_per_gantry_angle[gantry_position_number] += num_histories; histories_per_scan[scan_number-1] += num_histories; total_histories += num_histories; if( DEBUG_TEXT_ON ) printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number); } } if( DEBUG_TEXT_ON ) { for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ ) { if( file_number % NUM_SCANS == 0 ) printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) ); printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 ); } for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ ) printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1); printf("There are a Total of %d Histories\n", total_histories); } } void count_histories_v0() { if( DEBUG_TEXT_ON ) puts("Counting histories...\n"); char user_response[20]; char data_filename[256]; int file_size, num_histories, file_number = 0, gantry_position_number = 0; for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ ) { for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ ) { sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension ); ifstream data_file(data_filename, ios::binary); if( data_file == NULL ) { fputs( "File not found: Check that the directories and files are properly named.", stderr ); fgets(user_response, sizeof(user_response), stdin); exit(1); } char magic_number[5]; data_file.read(magic_number, 4); magic_number[4] = '\0'; if( strcmp(magic_number, "PCTD") ) { puts("Error: unknown file type (should be PCTD)!\n"); fgets(user_response, sizeof(user_response), stdin); exit(1); } int version_id; data_file.read((char*)&version_id, sizeof(int)); if( version_id == 0 ) { int num_histories; data_file.read((char*)&num_histories, sizeof(int)); data_file.close(); histories_per_file[file_number] = num_histories; histories_per_gantry_angle[gantry_position_number] += num_histories; histories_per_scan[scan_number-1] += num_histories; total_histories += num_histories; if( DEBUG_TEXT_ON ) printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number); } else { printf("ERROR: Unsupported format version (%d)!\n", version_id); fgets(user_response, sizeof(user_response), stdin); exit(1); } } } if( DEBUG_TEXT_ON ) { for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ ) { if( file_number % NUM_SCANS == 0 ) printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) ); printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 ); } for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ ) printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1); printf("There are a Total of %d Histories\n", total_histories); } // The GPU cannot process all the histories at once, so they are broken up into chunks that can fit on the GPU. As we iterate // through the data one chunk at a time, we determine which histories enter the reconstruction volume and if they belong to a // valid bin (i.e. t, v, and angular bin number is greater than zero and less than max). If both are true, we append the bin // number, WEPL, and relative entry/exit ut/uv angles to the following four arrays. We do not know ahead of time how many // valid histories there will be, so memory is allocated to accomodate every history and the actual number of valid histories // are counted. Although we waste some host memory, we can avoid writing intermediate information to file or keeping the raw // data and recalculating it every time its needed. Once all the data is processed and we know how many valid histories we // have, we simply ignore the illegitimate elements of the four arrays to avoid transferring invalid and unnecessary data to // and from the GPU. } void count_histories_v1() { if( DEBUG_TEXT_ON ) printf("Counting histories...\n"); char user_response[20]; char data_filename[128]; int file_size, num_histories, file_number = 0, gantry_position_number = 0; for( int gantry_angle = 0; gantry_angle < 360; gantry_angle += GANTRY_ANGLE_INTERVAL, gantry_position_number++ ) { for( int scan_number = 1; scan_number <= NUM_SCANS; scan_number++, file_number++ ) { sprintf(data_filename, "%s%s/%s_%03d%%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension ); FILE *data_file = fopen(data_filename, "rb"); if( data_file == NULL ) { fputs( "Error Opening Data File: Check that the directories are properly named.", stderr ); fgets(user_response, sizeof(user_response), stdin); exit(1); } fseek( data_file, 0, SEEK_END ); file_size = ftell( data_file ); if( BINARY_ENCODING ) { if( file_size % BYTES_PER_HISTORY ) { printf("ERROR! Problem with bytes_per_history!\n"); fgets(user_response, sizeof(user_response), stdin); exit(2); } num_histories = file_size / BYTES_PER_HISTORY; } else num_histories = file_size; fclose(data_file); histories_per_file[file_number] = num_histories; histories_per_gantry_angle[gantry_position_number] += num_histories; histories_per_scan[scan_number-1] += num_histories; total_histories += num_histories; if( DEBUG_TEXT_ON ) printf("There are %d Histories for Gantry Angle %d From Scan Number %d\n",num_histories, gantry_angle, scan_number); } } if( DEBUG_TEXT_ON ) { for( int file_number = 0, int gantry_position_number = 0; file_number < (NUM_SCANS * GANTRY_ANGLES); file_number++, gantry_position_number++ ) { if( file_number % NUM_SCANS == 0 ) printf("There are a Total of %d Histories From Gantry Angle %d\n", histories_per_gantry_angle[gantry_position_number], int(gantry_position_number* GANTRY_ANGLE_INTERVAL) ); printf("* %d Histories are From Scan Number %d\n", histories_per_file[file_number], (file_number % NUM_SCANS) + 1 ); } for( int scan_number = 0; scan_number < NUM_SCANS; scan_number++ ) printf("There are a Total of %d Histories in Scan Number %d \n", histories_per_scan[scan_number], scan_number + 1); printf("There are a Total of %d Histories\n", total_histories); } // The GPU cannot process all the histories at once, so they are broken up into chunks that can fit on the GPU. As we iterate // through the data one chunk at a time, we determine which histories enter the reconstruction volume and if they belong to a // valid bin (i.e. t, v, and angular bin number is greater than zero and less than max). If both are true, we append the bin // number, WEPL, and relative entry/exit ut/uv angles to the following four arrays. We do not know ahead of time how many // valid histories there will be, so memory is allocated to accomodate every history and the actual number of valid histories // are counted. Although we waste some host memory, we can avoid writing intermediate information to file or keeping the raw // data and recalculating it every time its needed. Once all the data is processed and we know how many valid histories we // have, we simply ignore the illegitimate elements of the four arrays to avoid transferring invalid and unnecessary data to // and from the GPU. } void reserve_vector_capacity() { bin_num_vector.reserve( total_histories ); //gantry_angle_vector.reserve( total_histories ); WEPL_vector.reserve( total_histories ); x_entry_vector.reserve( total_histories ); y_entry_vector.reserve( total_histories ); z_entry_vector.reserve( total_histories ); x_exit_vector.reserve( total_histories ); y_exit_vector.reserve( total_histories ); z_exit_vector.reserve( total_histories ); xy_entry_angle_vector.reserve( total_histories ); xz_entry_angle_vector.reserve( total_histories ); //xy_exit_angle_vector.reserve( total_histories ); //xz_exit_angle_vector.reserve( total_histories ); relative_ut_angle_vector.reserve( total_histories ); relative_uv_angle_vector.reserve( total_histories ); } /************************************************************************************************************************************************************/ /********************************************************* Data Importation, Initial Cuts, and Binning ******************************************************/ /************************************************************************************************************************************************************/ void iterative_data_read_old( int num_histories, int start_file_num, int end_file_num ) { unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; t_in_1_h = (float*) malloc(mem_size_hist_floats); t_in_2_h = (float*) malloc(mem_size_hist_floats); t_out_1_h = (float*) malloc(mem_size_hist_floats); t_out_2_h = (float*) malloc(mem_size_hist_floats); u_in_1_h = (float*) malloc(mem_size_hist_floats); u_in_2_h = (float*) malloc(mem_size_hist_floats); u_out_1_h = (float*) malloc(mem_size_hist_floats); u_out_2_h = (float*) malloc(mem_size_hist_floats); v_in_1_h = (float*) malloc(mem_size_hist_floats); v_in_2_h = (float*) malloc(mem_size_hist_floats); v_out_1_h = (float*) malloc(mem_size_hist_floats); v_out_2_h = (float*) malloc(mem_size_hist_floats); WEPL_h = (float*) malloc(mem_size_hist_floats); gantry_angle_h = (int*) malloc(mem_size_hist_ints); int array_index = 0, gantry_position, gantry_angle, scan_number, scan_histories; float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data; char tracker_plane[4]; char data_filename[128]; FILE* data_file; for( int file_num = start_file_num; file_num <= end_file_num; file_num++ ) { gantry_position = file_num / NUM_SCANS; gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL; scan_number = file_num % NUM_SCANS + 1; scan_histories = histories_per_file[file_num]; printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number ); sprintf( data_filename, "%s%s/%s_trans%d_%03d%s", input_directory, input_folder, input_base_name, scan_number, gantry_angle, file_extension ); data_file = fopen( data_filename, "rb" ); for( int history = 0; history < scan_histories; history++, array_index++ ) { fread(&v_data, sizeof(float), 4, data_file); fread(&t_data, sizeof(float), 4, data_file); fread(&tracker_plane, sizeof(char), 4, data_file); fread(&WEPL_data, sizeof(float), 1, data_file); fread(&gantry_angle_data, sizeof(float), 1, data_file); fread(&dummy_data, sizeof(float), 1, data_file); // dummy read because each event has an extra 4 bytes, for some reason if( DATA_IN_MM ) { // Convert the input data from mm to cm v_in_1_h[array_index] = v_data[0] * 0.1; v_in_2_h[array_index] = v_data[1] * 0.1; v_out_1_h[array_index] = v_data[2] * 0.1; v_out_2_h[array_index] = v_data[3] * 0.1; t_in_1_h[array_index] = t_data[0] * 0.1; t_in_2_h[array_index] = t_data[1] * 0.1; t_out_1_h[array_index] = t_data[2] * 0.1; t_out_2_h[array_index] = t_data[3] * 0.1; WEPL_h[array_index] = WEPL_data * 0.1; } else { v_in_1_h[array_index] = v_data[0]; v_in_2_h[array_index] = v_data[1]; v_out_1_h[array_index] = v_data[2]; v_out_2_h[array_index] = v_data[3]; t_in_1_h[array_index] = t_data[0]; t_in_2_h[array_index] = t_data[1]; t_out_1_h[array_index] = t_data[2]; t_out_2_h[array_index] = t_data[3]; WEPL_h[array_index] = WEPL_data; } if( !MICAH_SIM ) { u_in_1_h[array_index] = SSD_u_Positions[int(tracker_plane[0])]; u_in_2_h[array_index] = SSD_u_Positions[int(tracker_plane[1])]; u_out_1_h[array_index] = SSD_u_Positions[int(tracker_plane[2])]; u_out_2_h[array_index] = SSD_u_Positions[int(tracker_plane[3])]; } else { u_in_1_h[array_index] = SSD_u_Positions[0]; u_in_2_h[array_index] = SSD_u_Positions[2]; u_out_1_h[array_index] = SSD_u_Positions[4]; u_out_2_h[array_index] = SSD_u_Positions[6]; } if( SSD_IN_MM ) { // Convert the tracking plane positions from mm to cm u_in_1_h[array_index] *= 0.1; u_in_2_h[array_index] *= 0.1; u_out_1_h[array_index] *= 0.1; u_out_2_h[array_index] *= 0.1; } gantry_angle_h[array_index] = int(gantry_angle_data); } fclose(data_file); } } void iterative_data_read_v0( int num_histories, int start_file_num, int end_file_num ) { unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; t_in_1_h = (float*) malloc(mem_size_hist_floats); t_in_2_h = (float*) malloc(mem_size_hist_floats); t_out_1_h = (float*) malloc(mem_size_hist_floats); t_out_2_h = (float*) malloc(mem_size_hist_floats); u_in_1_h = (float*) malloc(mem_size_hist_floats); u_in_2_h = (float*) malloc(mem_size_hist_floats); u_out_1_h = (float*) malloc(mem_size_hist_floats); u_out_2_h = (float*) malloc(mem_size_hist_floats); v_in_1_h = (float*) malloc(mem_size_hist_floats); v_in_2_h = (float*) malloc(mem_size_hist_floats); v_out_1_h = (float*) malloc(mem_size_hist_floats); v_out_2_h = (float*) malloc(mem_size_hist_floats); WEPL_h = (float*) malloc(mem_size_hist_floats); gantry_angle_h = (int*) malloc(mem_size_hist_ints); /* Contains the following headers: Magic number identifier: "PCTD" (4-byte string) Format version identifier (integer) Number of events in file (integer) Projection angle (float | degrees) Beam energy (float | MeV) Acquisition/generation date (integer | Unix time) Pre-process date (integer | Unix time) Phantom name or description (variable length string) Data source (variable length string) Prepared by (variable length string) * Note on variable length strings: each variable length string should be preceded with an integer containing the number of characters in the string. Event data: Data is be stored with all of one type in a consecutive row, meaning the first entries will be N t0 values, where N is the number of events in the file. Next will be N t1 values, etc. This more closely matches the data structure in memory. Detector coordinates in mm relative to a phantom center, given in the detector coordinate system: t0 (float * N) t1 (float * N) t2 (float * N) t3 (float * N) v0 (float * N) v1 (float * N) v2 (float * N) v3 (float * N) u0 (float * N) u1 (float * N) u2 (float * N) u3 (float * N) WEPL in mm (float * N) */ char user_response[20]; char data_filename[128]; int array_index = 0; float min_WEPL = 20, max_WEPL = -20; for( int file_num = start_file_num; file_num <= end_file_num; file_num++ ) { int gantry_position = file_num / NUM_SCANS; int gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL; int scan_number = file_num % NUM_SCANS + 1; int scan_histories = histories_per_file[file_num]; printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number ); sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension ); ifstream data_file(data_filename, ios::binary); if( data_file == NULL ) { fputs( "File not found: Check that the directories and files are properly named.", stderr ); fgets(user_response, sizeof(user_response), stdin); exit(1); } char magic_number[5]; data_file.read(magic_number, 4); magic_number[4] = '\0'; if( strcmp(magic_number, "PCTD") ) { puts("Error: unknown file type (should be PCTD)!\n"); fgets(user_response, sizeof(user_response), stdin); exit(1); } int version_id; data_file.read((char*)&version_id, sizeof(int)); if( version_id == 0 ) { int num_histories; data_file.read((char*)&num_histories, sizeof(int)); puts("Reading headers from file...\n"); float projection_angle, beam_energy; int generation_date, preprocess_date; int phantom_name_size, data_source_size, prepared_by_size; char *phantom_name, *data_source, *prepared_by; data_file.read((char*)&projection_angle, sizeof(float)); data_file.read((char*)&beam_energy, sizeof(float)); data_file.read((char*)&generation_date, sizeof(int)); data_file.read((char*)&preprocess_date, sizeof(int)); data_file.read((char*)&phantom_name_size, sizeof(int)); phantom_name = (char*)malloc(phantom_name_size); data_file.read(phantom_name, phantom_name_size); data_file.read((char*)&data_source_size, sizeof(int)); data_source = (char*)malloc(data_source_size); data_file.read(data_source, data_source_size); data_file.read((char*)&prepared_by_size, sizeof(int)); prepared_by = (char*)malloc(prepared_by_size); data_file.read(prepared_by, prepared_by_size); printf("Loading %d histories from file\n", num_histories); int data_size = num_histories * sizeof(float); data_file.read((char*)t_in_1_h, data_size); data_file.read((char*)t_in_2_h, data_size); data_file.read((char*)t_out_1_h, data_size); data_file.read((char*)t_out_2_h, data_size); data_file.read((char*)v_in_1_h, data_size); data_file.read((char*)v_in_2_h, data_size); data_file.read((char*)v_out_1_h, data_size); data_file.read((char*)v_out_2_h, data_size); data_file.read((char*)u_in_1_h, data_size); data_file.read((char*)u_in_2_h, data_size); data_file.read((char*)u_out_1_h, data_size); data_file.read((char*)u_out_2_h, data_size); data_file.read((char*)WEPL_h, data_size); //float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data; for( int i = 0; i < num_histories; i++ ) { if( DATA_IN_MM ) { // Convert the input data from mm to cm v_in_1_h[i] *= 0.1; v_in_2_h[i] *= 0.1; v_out_1_h[i] *= 0.1; v_out_2_h[i] *= 0.1; t_in_1_h[i] *= 0.1; t_in_2_h[i] *= 0.1; t_out_1_h[i] *= 0.1; t_out_2_h[i] *= 0.1; WEPL_h[i] *= 0.1; if( WEPL_h[i] < 0 ) printf("WEPL[%d] = %3f\n", i, WEPL_h[i] ); u_in_1_h[i] *= 0.1; u_in_2_h[i] *= 0.1; u_out_1_h[i] *= 0.1; u_out_2_h[i] *= 0.1; } gantry_angle_h[i] = int(projection_angle); } data_file.close(); } } } void iterative_data_read_v1( int num_histories, int start_file_num, int end_file_num ){ unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; t_in_1_h = (float*) malloc(mem_size_hist_floats); t_in_2_h = (float*) malloc(mem_size_hist_floats); t_out_1_h = (float*) malloc(mem_size_hist_floats); t_out_2_h = (float*) malloc(mem_size_hist_floats); u_in_1_h = (float*) malloc(mem_size_hist_floats); u_in_2_h = (float*) malloc(mem_size_hist_floats); u_out_1_h = (float*) malloc(mem_size_hist_floats); u_out_2_h = (float*) malloc(mem_size_hist_floats); v_in_1_h = (float*) malloc(mem_size_hist_floats); v_in_2_h = (float*) malloc(mem_size_hist_floats); v_out_1_h = (float*) malloc(mem_size_hist_floats); v_out_2_h = (float*) malloc(mem_size_hist_floats); WEPL_h = (float*) malloc(mem_size_hist_floats); gantry_angle_h = (int*) malloc(mem_size_hist_ints); /* Contains the following headers: Magic number identifier: "PCTD" (4-byte string) Format version identifier (integer) Number of events in file (integer) Projection angle (float | degrees) Beam energy (float | MeV) Acquisition/generation date (integer | Unix time) Pre-process date (integer | Unix time) Phantom name or description (variable length string) Data source (variable length string) Prepared by (variable length string) * Note on variable length strings: each variable length string should be preceded with an integer containing the number of characters in the string. Event data: Data is be stored with all of one type in a consecutive row, meaning the first entries will be N t0 values, where N is the number of events in the file. Next will be N t1 values, etc. This more closely matches the data structure in memory. Detector coordinates in mm relative to a phantom center, given in the detector coordinate system: t0 (float * N) t1 (float * N) t2 (float * N) t3 (float * N) v0 (float * N) v1 (float * N) v2 (float * N) v3 (float * N) u0 (float * N) u1 (float * N) u2 (float * N) u3 (float * N) WEPL in mm (float * N) */ char user_response[20]; char data_filename[128]; int array_index = 0; float min_WEPL = 20, max_WEPL = -20; for( int file_num = start_file_num; file_num <= end_file_num; file_num++ ) { int gantry_position = file_num / NUM_SCANS; int gantry_angle = gantry_position * GANTRY_ANGLE_INTERVAL; int scan_number = file_num % NUM_SCANS + 1; int scan_histories = histories_per_file[file_num]; printf("Reading File for Gantry Angle %d from Scan Number %d...\n", gantry_angle, scan_number ); sprintf(data_filename, "%s%s/%s_%03d%s", input_directory, input_folder, input_base_name, gantry_angle, file_extension ); ifstream data_file(data_filename, ios::binary); if( data_file == NULL ) { fputs( "File not found: Check that the directories and files are properly named.", stderr ); fgets(user_response, sizeof(user_response), stdin); exit(1); } char magic_number[5]; data_file.read(magic_number, 4); magic_number[4] = '\0'; if( strcmp(magic_number, "PCTD") ) { puts("Error: unknown file type (should be PCTD)!\n"); fgets(user_response, sizeof(user_response), stdin); exit(1); } int version_id; data_file.read((char*)&version_id, sizeof(int)); if( version_id == 0 ) { int num_histories; data_file.read((char*)&num_histories, sizeof(int)); puts("Reading headers from file...\n"); float projection_angle, beam_energy; int generation_date, preprocess_date; int phantom_name_size, data_source_size, prepared_by_size; char *phantom_name, *data_source, *prepared_by; data_file.read((char*)&projection_angle, sizeof(float)); data_file.read((char*)&beam_energy, sizeof(float)); data_file.read((char*)&generation_date, sizeof(int)); data_file.read((char*)&preprocess_date, sizeof(int)); data_file.read((char*)&phantom_name_size, sizeof(int)); phantom_name = (char*)malloc(phantom_name_size); data_file.read(phantom_name, phantom_name_size); data_file.read((char*)&data_source_size, sizeof(int)); data_source = (char*)malloc(data_source_size); data_file.read(data_source, data_source_size); data_file.read((char*)&prepared_by_size, sizeof(int)); prepared_by = (char*)malloc(prepared_by_size); data_file.read(prepared_by, prepared_by_size); printf("Loading %d histories from file\n", num_histories); int data_size = num_histories * sizeof(float); data_file.read((char*)t_in_1_h, data_size); data_file.read((char*)t_in_2_h, data_size); data_file.read((char*)t_out_1_h, data_size); data_file.read((char*)t_out_2_h, data_size); data_file.read((char*)v_in_1_h, data_size); data_file.read((char*)v_in_2_h, data_size); data_file.read((char*)v_out_1_h, data_size); data_file.read((char*)v_out_2_h, data_size); data_file.read((char*)u_in_1_h, data_size); data_file.read((char*)u_in_2_h, data_size); data_file.read((char*)u_out_1_h, data_size); data_file.read((char*)u_out_2_h, data_size); data_file.read((char*)WEPL_h, data_size); //float v_data[4], t_data[4], WEPL_data, gantry_angle_data, dummy_data; for( int i = 0; i < num_histories; i++ ) { if( DATA_IN_MM ) { // Convert the input data from mm to cm v_in_1_h[i] *= 0.1; v_in_2_h[i] *= 0.1; v_out_1_h[i] *= 0.1; v_out_2_h[i] *= 0.1; t_in_1_h[i] *= 0.1; t_in_2_h[i] *= 0.1; t_out_1_h[i] *= 0.1; t_out_2_h[i] *= 0.1; WEPL_h[i] *= 0.1; if( WEPL_h[i] < 0 ) printf("WEPL[%d] = %3f\n", i, WEPL_h[i] ); u_in_1_h[i] *= 0.1; u_in_2_h[i] *= 0.1; u_out_1_h[i] *= 0.1; u_out_2_h[i] *= 0.1; } gantry_angle_h[i] = int(projection_angle); } data_file.close(); } } } void recon_volume_intersections( int num_histories ) { //printf("There are %d histories in this projection\n", num_histories ); unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; unsigned int mem_size_hist_bool = sizeof(bool) * num_histories; // Allocate GPU memory cudaMalloc((void**) &t_in_1_d, mem_size_hist_floats); cudaMalloc((void**) &t_in_2_d, mem_size_hist_floats); cudaMalloc((void**) &t_out_1_d, mem_size_hist_floats); cudaMalloc((void**) &t_out_2_d, mem_size_hist_floats); cudaMalloc((void**) &u_in_1_d, mem_size_hist_floats); cudaMalloc((void**) &u_in_2_d, mem_size_hist_floats); cudaMalloc((void**) &u_out_1_d, mem_size_hist_floats); cudaMalloc((void**) &u_out_2_d, mem_size_hist_floats); cudaMalloc((void**) &v_in_1_d, mem_size_hist_floats); cudaMalloc((void**) &v_in_2_d, mem_size_hist_floats); cudaMalloc((void**) &v_out_1_d, mem_size_hist_floats); cudaMalloc((void**) &v_out_2_d, mem_size_hist_floats); cudaMalloc((void**) &WEPL_d, mem_size_hist_floats); cudaMalloc((void**) &gantry_angle_d, mem_size_hist_ints); cudaMalloc((void**) &x_entry_d, mem_size_hist_floats); cudaMalloc((void**) &y_entry_d, mem_size_hist_floats); cudaMalloc((void**) &z_entry_d, mem_size_hist_floats); cudaMalloc((void**) &x_exit_d, mem_size_hist_floats); cudaMalloc((void**) &y_exit_d, mem_size_hist_floats); cudaMalloc((void**) &z_exit_d, mem_size_hist_floats); cudaMalloc((void**) &xy_entry_angle_d, mem_size_hist_floats); cudaMalloc((void**) &xz_entry_angle_d, mem_size_hist_floats); cudaMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats); cudaMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats); cudaMalloc((void**) &relative_ut_angle_d, mem_size_hist_floats); cudaMalloc((void**) &relative_uv_angle_d, mem_size_hist_floats); cudaMalloc((void**) &traversed_recon_volume_d, mem_size_hist_bool); cudaMemcpy(t_in_1_d, t_in_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(t_in_2_d, t_in_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(t_out_1_d, t_out_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(t_out_2_d, t_out_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(u_in_1_d, u_in_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(u_in_2_d, u_in_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(u_out_1_d, u_out_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(u_out_2_d, u_out_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(v_in_1_d, v_in_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(v_in_2_d, v_in_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(v_out_1_d, v_out_1_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(v_out_2_d, v_out_2_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; cudaMemcpy(gantry_angle_d, gantry_angle_h, mem_size_hist_ints, cudaMemcpyHostToDevice) ; cudaMemcpy(WEPL_d, WEPL_h, mem_size_hist_floats, cudaMemcpyHostToDevice) ; dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1); recon_volume_intersections_kernel<<<dimGrid, dimBlock>>> ( num_histories, gantry_angle_d, traversed_recon_volume_d, WEPL_d, t_in_1_d, t_in_2_d, t_out_1_d, t_out_2_d, u_in_1_d, u_in_2_d, u_out_1_d, u_out_2_d, v_in_1_d, v_in_2_d, v_out_1_d, v_out_2_d, x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d, xy_entry_angle_d, xz_entry_angle_d, xy_exit_angle_d, xz_exit_angle_d, relative_ut_angle_d, relative_uv_angle_d ); free(t_in_1_h); free(t_in_2_h); free(v_in_1_h); free(v_in_2_h); free(u_in_1_h); free(u_in_2_h); free(t_out_1_h); free(t_out_2_h); free(v_out_1_h); free(v_out_2_h); free(u_out_1_h); free(u_out_2_h); cudaFree(t_in_1_d); cudaFree(t_in_2_d); cudaFree(v_in_1_d); cudaFree(v_in_2_d); cudaFree(u_in_1_d); cudaFree(u_in_2_d); cudaFree(t_out_1_d); cudaFree(t_out_2_d); cudaFree(v_out_1_d); cudaFree(v_out_2_d); cudaFree(u_out_1_d); cudaFree(u_out_2_d); cudaFree(gantry_angle_d); } __global__ void recon_volume_intersections_kernel ( int num_histories, int* gantry_angle, bool* traversed_recon_volume, float* WEPL, float* t_in_1, float* t_in_2, float* t_out_1, float* t_out_2, float* u_in_1, float* u_in_2, float* u_out_1, float* u_out_2, float* v_in_1, float* v_in_2, float* v_out_1, float* v_out_2, float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit, float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle, float* relative_ut_angle, float* relative_uv_angle ) { /* Determine if the proton path passes through the reconstruction volume (i.e. intersects the reconstruction cylinder twice) and if it does, determine the x, y, and z positions in the global/object coordinate system where the proton enters and exits the reconstruction volume. The origin of the object coordinate system is defined to be at the center of the reconstruction cylinder so that its volume is bounded by: -RECON_CYL_RADIUS <= x <= RECON_CYL_RADIUS -RECON_CYL_RADIUS <= y <= RECON_CYL_RADIUS -RECON_CYL_HEIGHT/2 <= z <= RECON_CYL_HEIGHT/2 First, the coordinates of the points where the proton path intersected the entry/exit detectors must be calculated. Since the detectors records data in the detector coordinate system, data in the utv coordinate system must be converted into the global/object coordinate system. The coordinate transformation can be accomplished using a rotation matrix with an angle of rotation determined by the angle between the two coordinate systems, which is the gantry_angle, in this case: Rotate ut-coordinate system to xy-coordinate system x = cos( gantry_angle ) * u - sin( gantry_angle ) * t y = sin( gantry_angle ) * u + cos( gantry_angle ) * t Rotate xy-coordinate system to ut-coordinate system u = cos( gantry_angle ) * x + sin( gantry_angle ) * y t = cos( gantry_angle ) * y - sin( gantry_angle ) * x If a proton passes through the reconstruction volume, then the line defining its path in the xy-plane will intersect the circle defining the boundary of the reconstruction cylinder in the xy-plane twice. We can determine if the proton path passes through the reconstruction volume by equating the equations of the proton path and the circle. This produces a second order polynomial which we must solve: f(x)_proton = f(x)_cylinder mx+b = sqrt(r^2 - x^2) m^2x^2 + 2mbx + b^2 = r^2 - x^2 (m^2 + 1)x^2 + 2mbx + (b^2 - r^2) = 0 ax^2 + bx + c = 0 => a = m^2 + 1 b = 2mb c = b^2 - r^2 We can solve this using the quadratic formula ([-b +/- sqrt(b^2-4ac)]/2a). If the proton passed through the reconstruction volume, then the determinant will be greater than zero ( b^2-4ac > 0 ) and the quadratic formula will return two unique points of intersection. The intersection point closest to where the proton entry/exit path intersects the entry/exit detector plane is calculated and The proton entry/exit path If the determinant <= 0, then the proton path does not go through the reconstruction volume and we need not determine intersection coordinates. Two points are returned by the quadratic formula for each reconstruction cylinder intersection, the coordinates closest to the point where the entry/exit path intersected the detector plane are determined If the exit/entry path travels through the cone bounded by y=|x| && y=-|x| the x_coordinates will be small and the difference between the entry and exit x-coordinates will approach zero, causing instabilities in trig functions and slope calculations ( x difference in denominator). To overcome these innaccurate calculations, coordinates for these proton paths will be rotated PI/2 radians(90 degrees) prior to calculations and rotated back when they are completed using a rotation matrix transformation again: Positive Rotation By 90 Degrees x' = cos( 90 ) * x - sin( 90 ) * y = -y y' = sin( 90 ) * x + cos( 90 ) * y = x Negative Rotation By 90 Degree x' = cos( 90 ) * x + sin( 90 ) * y = y y' = cos( 90 ) * y - sin( 90 ) * x = -x */ float a = 0, b = 0, c = 0; float x_intercept_1, x_intercept_2, y_intercept_1, y_intercept_2, squared_distance_1, squared_distance_2; float x_temp, y_temp; int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; float rotation_angle_radians = gantry_angle[i] * ANGLE_TO_RADIANS; traversed_recon_volume[i] = false; if( i < num_histories ) { /***************************************************************************************************************/ /**************************************** Check entry information **********************************************/ /***************************************************************************************************************/ // Determine if the proton path enters the reconstruction volume. The proton path is defined using the entry angle and // position where the proton intersected the entry SSD which is closest to the object. If this line projected onto the // xy plane intersects the reconstruction cylinder, the line will intersect the circle in the xy plane which describes the // boundary of the reconstruction cylinder twice and its entry elevation will be within the height of the cylinder. // Relevant angles in radians: gantry angle, proton path entry angle in ut and xy planes. float ut_entry_angle = atan2f( t_in_2[i] - t_in_1[i], u_in_2[i] - u_in_1[i] ); xy_entry_angle[i] = ut_entry_angle + rotation_angle_radians; if( xy_entry_angle[i] < 0 ) xy_entry_angle[i] += TWO_PI; // Rotate entry detector positions float x_in = ( cosf( rotation_angle_radians ) * u_in_2[i] ) - ( sinf( rotation_angle_radians ) * t_in_2[i] ); float y_in = ( sinf( rotation_angle_radians ) * u_in_2[i] ) + ( cosf( rotation_angle_radians ) * t_in_2[i] ); // Determine if entry points should be rotated bool entry_in_cone = ( (xy_entry_angle[i] > PI_OVER_4) && (xy_entry_angle[i] < THREE_PI_OVER_4) ) || ( (xy_entry_angle[i] > FIVE_PI_OVER_4) && (xy_entry_angle[i] < SEVEN_PI_OVER_4) ); // Rotate x_in & y_in by 90 degrees, if necessary if( entry_in_cone ) { x_temp = x_in; y_temp = y_in; x_in = -y_temp; y_in = x_temp; xy_entry_angle[i] += PI_OVER_2; } float m_in = tanf( xy_entry_angle[i] ); // proton entry path slope float b_in = y_in - m_in * x_in; // proton entry path y-intercept // Quadratic formula coefficients a = 1 + pow(m_in, 2); // x^2 coefficient b = 2 * m_in * b_in; // x coefficient c = pow(b_in, 2) - pow(RECON_CYL_RADIUS, 2 ); // 1 coefficient float entry_discriminant = pow(b, 2) - (4 * a * c); // Quadratic formula discriminant bool entered = ( entry_discriminant > 0 ); // Proton path intersected twice // Find both intersection points of the circle; closest one to the entry SSDs is the entry position // Notice that x_intercept_2 = ( -b - sqrt(...) ) / ( 2 * a ) has the negative sign pulled out and following calculations modified as necessary // e.g. x_intercept_2 = -x_real_2 // y_intercept_2 = -y_real_2 // squared_distance_2 = sqd_real_2 since (x_intercept_2 + x_in)^2 = (-x_intercept_2 - x_in)^2 = (x_real_2 - x_in)^2 (same for y term) // This negation is also considered when assigning x_entry/y_entry using -x_intercept_2/y_intercept_2 *(TRUE/FALSE = 1/0) if( entered ) { x_intercept_1 = ( sqrtf(entry_discriminant) - b ) / ( 2 * a ); x_intercept_2 = ( sqrtf(entry_discriminant) + b ) / ( 2 * a ); y_intercept_1 = m_in * x_intercept_1 + b_in; y_intercept_2 = m_in * x_intercept_2 - b_in; squared_distance_1 = pow(x_intercept_1 - x_in, 2) + pow(y_intercept_1 - y_in, 2); squared_distance_2 = pow(x_intercept_2 + x_in, 2) + pow(y_intercept_2 + y_in, 2); x_entry[i] = x_intercept_1 * (squared_distance_1 <= squared_distance_2) - x_intercept_2 * (squared_distance_1 > squared_distance_2); y_entry[i] = y_intercept_1 * (squared_distance_1 <= squared_distance_2) - y_intercept_2 * (squared_distance_1 > squared_distance_2); } // Unrotate by 90 degrees, if necessary if( entry_in_cone ) { x_temp = x_entry[i]; y_temp = y_entry[i]; x_entry[i] = y_temp; y_entry[i] = -x_temp; xy_entry_angle[i] -= PI_OVER_2; } /***************************************************************************************************************/ /****************************************** Check exit information *********************************************/ /***************************************************************************************************************/ // Repeat the procedure above, this time to determine if the proton path exited the reconstruction volume and if so, the // x,y,z position where it exited float ut_exit_angle = atan2f( t_out_2[i] - t_out_1[i], u_out_2[i] - u_out_1[i] ); xy_exit_angle[i] = ut_exit_angle + rotation_angle_radians; if( xy_exit_angle[i] < 0 ) xy_exit_angle[i] += TWO_PI; // Rotate exit detector positions float x_out = ( cosf(rotation_angle_radians) * u_out_1[i] ) - ( sinf(rotation_angle_radians) * t_out_1[i] ); float y_out = ( sinf(rotation_angle_radians) * u_out_1[i] ) + ( cosf(rotation_angle_radians) * t_out_1[i] ); // Determine if exit points should be rotated bool exit_in_cone = ( (xy_exit_angle[i] > PI_OVER_4) && (xy_exit_angle[i] < THREE_PI_OVER_4) ) || ( (xy_exit_angle[i] > FIVE_PI_OVER_4) && (xy_exit_angle[i] < SEVEN_PI_OVER_4) ); // Rotate x_out & y_out by 90 degrees, if necessary if( exit_in_cone ) { x_temp = x_out; y_temp = y_out; x_out = -y_temp; y_out = x_temp; xy_exit_angle[i] += PI_OVER_2; } float m_out = tanf( xy_exit_angle[i] ); // proton entry path slope float b_out = y_out - m_out * x_out; // proton entry path y-intercept // Quadratic formula coefficients a = 1 + pow(m_out, 2); // x^2 coefficient b = 2 * m_out * b_out; // x coefficient c = pow(b_out, 2) - pow(RECON_CYL_RADIUS, 2); // 1 coefficient float exit_discriminant = pow(b, 2) - (4 * a * c); // Quadratic formula discriminant bool exited = ( exit_discriminant > 0 ); // Proton path intersected twice // Find both intersection points of the circle; closest one to the exit SSDs is the exit position if( exited ) { x_intercept_1 = ( sqrtf(exit_discriminant) - b ) / ( 2 * a ); x_intercept_2 = ( sqrtf(exit_discriminant) + b ) / ( 2 * a );// -x calculated y_intercept_1 = m_out * x_intercept_1 + b_out; y_intercept_2 = m_out * x_intercept_2 - b_out;// -y calculated squared_distance_1 = pow(x_intercept_1 - x_out, 2) + pow(y_intercept_1 - y_out, 2); squared_distance_2 = pow(x_intercept_2 + x_out, 2) + pow(y_intercept_2 + y_out, 2);// modified due to -x and -y calcs above x_exit[i] = x_intercept_1 * (squared_distance_1 <= squared_distance_2) - x_intercept_2 * (squared_distance_1 > squared_distance_2); y_exit[i] = y_intercept_1 * (squared_distance_1 <= squared_distance_2) - y_intercept_2 * (squared_distance_1 > squared_distance_2); } // Unrotate by 90 degrees, if necessary if( exit_in_cone ) { x_temp = x_exit[i]; y_temp = y_exit[i]; x_exit[i] = y_temp; y_exit[i] = -x_temp; xy_exit_angle[i] -= PI_OVER_2; } /***************************************************************************************************************/ /***************************************** Check z(v) direction ************************************************/ /***************************************************************************************************************/ // Relevant angles/slopes in radians for entry and exit in the uv plane float uv_entry_slope = ( v_in_2[i] - v_in_1[i] ) / ( u_in_2[i] - u_in_1[i] ); float uv_exit_slope = ( v_out_2[i] - v_out_1[i] ) / ( u_out_2[i] - u_out_1[i] ); float uv_entry_angle = atan2( v_in_2[i] - v_in_1[i], u_in_2[i] - u_in_1[i] ); float uv_exit_angle = atan2( v_out_2[i] - v_out_1[i], u_out_2[i] - u_out_1[i] ); xz_entry_angle[i] = uv_entry_angle; xz_exit_angle[i] = uv_exit_angle; if( xz_entry_angle[i] < 0 ) xz_entry_angle[i] += TWO_PI; if( xz_exit_angle[i] < 0 ) xz_exit_angle[i] += TWO_PI; // Calculate the u coordinate for the entry and exit points of the reconstruction volume and then use the uv slope calculated // from the detector entry and exit positions to determine the z position of the proton as it entered and exited the // reconstruction volume /* u-coordinate of the entry and exit points of the reconsruction cylinder can be found using an inverse rotation u = cos( gantry_angle ) * x + sin( gantry_angle ) * y */ float u_entry = ( cosf( rotation_angle_radians ) * x_entry[i] ) + ( sinf( rotation_angle_radians ) * y_entry[i] ); float u_exit = ( cosf(rotation_angle_radians) * x_exit[i] ) + ( sinf(rotation_angle_radians) * y_exit[i] ); z_entry[i] = v_in_2[i] + uv_entry_slope * ( u_entry - u_in_2[i] ); z_exit[i] = v_out_1[i] - uv_exit_slope * ( u_out_1[i] - u_exit ); // Even if the proton path intersected the circle describing the boundary of the cylinder twice, it may not have actually // passed through the reconstruction volume or may have only passed through part way. If |z_entry|> RECON_CYL_HEIGHT/2 , // then something off happened since the the source is around z=0 and we do not want to use this history. If the // |z_entry| < RECON_CYL_HEIGHT/2 and |z_exit| > RECON_CYL_HEIGHT/2 then we want to use the history but the x_exit and // y_exit positions need to be calculated again based on how far through the cylinder the proton passed before exiting it if( entered && exited ) { if( ( fabs(z_entry[i]) <= RECON_CYL_HEIGHT * 0.5 ) && ( fabs(z_exit[i]) > RECON_CYL_HEIGHT * 0.5 ) ) { float recon_cyl_fraction = fabs( ( ( (z_exit[i] >= 0) - (z_exit[i] < 0) ) * RECON_CYL_HEIGHT * 0.5 - z_entry[i] ) / ( z_exit[i] - z_entry[i] ) ); x_exit[i] = x_entry[i] + recon_cyl_fraction * ( x_exit[i] - x_entry[i] ); y_exit[i] = y_entry[i] + recon_cyl_fraction * ( y_exit[i] - y_entry[i] ); z_exit[i] = ( (z_exit[i] >= 0) - (z_exit[i] < 0) ) * RECON_CYL_HEIGHT * 0.5; } else if( fabs(z_entry[i]) > RECON_CYL_HEIGHT * 0.5 ) { entered = false; exited = false; } // Check the measurement locations. Do not allow more than 5 cm difference in entry and exit in t and v. This gets // rid of spurious events. if( ( fabs(t_out_1[i] - t_in_2[i]) > 5 ) || ( fabs(v_out_1[i] - v_in_2[i]) > 5 ) ) { entered = false; exited = false; } } relative_ut_angle[i] = ut_exit_angle - ut_entry_angle; relative_uv_angle[i] = uv_exit_angle - uv_entry_angle; // Proton passed through the reconstruction volume only if it both entered and exited the reconstruction cylinder traversed_recon_volume[i] = entered && exited; } } void bin_valid_histories( int num_histories ) { unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; unsigned int mem_size_hist_bool = sizeof(bool) * num_histories; traversed_recon_volume_h = (bool*) calloc( num_histories, sizeof(bool) ); bin_num_h = (int*) calloc( num_histories, sizeof(int) ); x_entry_h = (float*) calloc( num_histories, sizeof(float) ); y_entry_h = (float*) calloc( num_histories, sizeof(float) ); z_entry_h = (float*) calloc( num_histories, sizeof(float) ); x_exit_h = (float*) calloc( num_histories, sizeof(float) ); y_exit_h = (float*) calloc( num_histories, sizeof(float) ); z_exit_h = (float*) calloc( num_histories, sizeof(float) ); xy_entry_angle_h = (float*) calloc( num_histories, sizeof(float) ); xz_entry_angle_h = (float*) calloc( num_histories, sizeof(float) ); xy_exit_angle_h = (float*) calloc( num_histories, sizeof(float) ); xz_exit_angle_h = (float*) calloc( num_histories, sizeof(float) ); relative_ut_angle_h = (float*) calloc( num_histories, sizeof(float) ); relative_uv_angle_h = (float*) calloc( num_histories, sizeof(float) ); cudaMalloc((void**) &bin_num_d, mem_size_hist_ints ); cudaMemcpy( bin_num_d, bin_num_h, mem_size_hist_ints, cudaMemcpyHostToDevice ); dim3 dimBlock( THREADS_PER_BLOCK ); dim3 dimGrid( (int)( num_histories/THREADS_PER_BLOCK ) + 1 ); bin_valid_histories_kernel<<<dimGrid, dimBlock>>> ( num_histories, bin_counts_d, bin_num_d, traversed_recon_volume_d, x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d, WEPL_d, xy_entry_angle_d, xz_entry_angle_d, xy_exit_angle_d, xz_exit_angle_d, relative_ut_angle_d, relative_uv_angle_d ); cudaMemcpy( traversed_recon_volume_h, traversed_recon_volume_d, mem_size_hist_bool, cudaMemcpyDeviceToHost ); cudaMemcpy( bin_num_h, bin_num_d, mem_size_hist_ints, cudaMemcpyDeviceToHost ); cudaMemcpy( x_entry_h, x_entry_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( y_entry_h, y_entry_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( z_entry_h, z_entry_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( x_exit_h, x_exit_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( y_exit_h, y_exit_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( z_exit_h, z_exit_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( xy_entry_angle_h, xy_entry_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( xz_entry_angle_h, xz_entry_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( xy_exit_angle_h, xy_exit_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( xz_exit_angle_h, xz_exit_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( relative_ut_angle_h, relative_ut_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); cudaMemcpy( relative_uv_angle_h, relative_uv_angle_d, mem_size_hist_floats, cudaMemcpyDeviceToHost ); int offset = 0; for( int i = 0; i < num_histories; i++ ) { if( traversed_recon_volume_h[i] && ( bin_num_h[i] >= 0 ) ) { bin_num_vector.push_back( bin_num_h[i] ); //gantry_angle_vector.push_back( gantry_angle_h[i] ); WEPL_vector.push_back( WEPL_h[i] ); x_entry_vector.push_back( x_entry_h[i] ); y_entry_vector.push_back( y_entry_h[i] ); z_entry_vector.push_back( z_entry_h[i] ); x_exit_vector.push_back( x_exit_h[i] ); y_exit_vector.push_back( y_exit_h[i] ); z_exit_vector.push_back( z_exit_h[i] ); xy_entry_angle_vector.push_back( xy_entry_angle_h[i] ); xz_entry_angle_vector.push_back( xz_entry_angle_h[i] ); //xy_exit_angle_vector.push_back( xy_exit_angle_h[i] ); //xz_exit_angle_vector.push_back( xz_exit_angle_h[i] ); relative_ut_angle_vector.push_back( relative_ut_angle_h[i] ); relative_uv_angle_vector.push_back( relative_uv_angle_h[i] ); offset++; recon_vol_histories++; } } printf( "%d out of %d histories passed intersection cuts this iteration\n", offset, num_histories ); free( traversed_recon_volume_h ); free( bin_num_h ); free( x_entry_h ); free( y_entry_h ); free( z_entry_h ); free( x_exit_h ); free( y_exit_h ); free( z_exit_h ); free( xy_entry_angle_h ); free( xz_entry_angle_h ); free( xy_exit_angle_h ); free( xz_exit_angle_h ); free( relative_ut_angle_h ); free( relative_uv_angle_h ); //cudaFree( bin_num_d ); cudaFree( xy_entry_angle_d ); cudaFree( xz_entry_angle_d ); cudaFree( xy_exit_angle_d ); cudaFree( xz_exit_angle_d ); cudaFree( relative_ut_angle_d ); cudaFree( relative_uv_angle_d ); } __global__ void bin_valid_histories_kernel ( int num_histories, int* bin_counts, int* bin_num, bool* traversed_recon_volume, float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle, float* WEPL, float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle, float* relative_ut_angle, float* relative_uv_angle ) { int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if( i < num_histories ) { if( traversed_recon_volume[i] ) { float x_midpath, y_midpath, z_midpath, path_angle; int angle_bin, t_bin, v_bin; float angle, t, v; x_midpath = ( x_entry[i] + x_exit[i] ) / 2; y_midpath = ( y_entry[i] + y_exit[i] ) / 2; z_midpath = ( z_entry[i] + z_exit[i] ) / 2; path_angle = atan2( ( y_exit[i] - y_entry[i] ) , ( x_exit[i] - x_entry[i] ) ); if( path_angle < 0 ) path_angle += 2*PI; angle_bin = int( ( path_angle * RADIANS_TO_ANGLE / ANGULAR_BIN_SIZE ) + 0.5) % ANGULAR_BINS; angle = angle_bin * ANGULAR_BIN_SIZE * ANGLE_TO_RADIANS; t = y_midpath * cosf(angle) - x_midpath * sinf(angle); t_bin = int( (t / T_BIN_SIZE ) + T_BINS/2); v = z_midpath; v_bin = int( (v / V_BIN_SIZE ) + V_BINS/2); if( (t_bin >= 0) && (v_bin >= 0) && (t_bin < T_BINS) && (v_bin < V_BINS) ) { bin_num[i] = t_bin + angle_bin * T_BINS + v_bin * T_BINS * ANGULAR_BINS; atomicAdd( &bin_counts[bin_num[i]], 1 ); atomicAdd( &mean_WEPL[bin_num[i]], WEPL[i] ); atomicAdd( &mean_rel_ut_angle[bin_num[i]], relative_ut_angle[i] ); atomicAdd( &mean_rel_uv_angle[bin_num[i]], relative_uv_angle[i] ); } else bin_num[i] = -1; } } } /************************************************************************************************************************************************************/ /*************************************************************** Statistical Analysis and Cuts **************************************************************/ /************************************************************************************************************************************************************/ void calculate_means() { dim3 dimBlock( T_BINS ); dim3 dimGrid( V_BINS, ANGULAR_BINS ); calculate_means_kernel<<< dimGrid, dimBlock >>> ( bin_counts_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d ); //cudaMemcpy( bin_counts_h, bin_counts_d, MEM_SIZE_BINS_INTS, cudaMemcpyDeviceToHost ); //cudaMemcpy( mean_WEPL_h, mean_WEPL_d, MEM_SIZE_BINS_FLOATS, cudaMemcpyDeviceToHost ); //cudaMemcpy( mean_rel_ut_angle_h, mean_rel_ut_angle_d, MEM_SIZE_BINS_FLOATS, cudaMemcpyDeviceToHost ); //cudaMemcpy( mean_rel_uv_angle_h, mean_rel_uv_angle_d, MEM_SIZE_BINS_FLOATS, cudaMemcpyDeviceToHost ); //write_integer_array_to_file("bin_counts_h_pre", output_directory, output_folder, bin_counts_h, T_BINS, ANGULAR_BINS, V_BINS ); //write_float_array_to_file("mean_WEPL_h", output_directory, output_folder, mean_WEPL_h, T_BINS, ANGULAR_BINS, V_BINS ); //write_float_array_to_file("mean_rel_ut_angle_h", output_directory, output_folder, mean_rel_ut_angle_h, T_BINS, ANGULAR_BINS, V_BINS ); //write_float_array_to_file("mean_rel_uv_angle_h", output_directory, output_folder, mean_rel_uv_angle_h, T_BINS, ANGULAR_BINS, V_BINS ); free(bin_counts_h); free(mean_WEPL_h); free(mean_rel_ut_angle_h); free(mean_rel_uv_angle_h); } __global__ void calculate_means_kernel( int* bin_counts, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle ) { int v = blockIdx.x; int angle = blockIdx.y; int t = threadIdx.x; int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS; if( bin_counts[bin] > 0 ) { mean_WEPL[bin] /= bin_counts[bin]; mean_rel_ut_angle[bin] /= bin_counts[bin]; mean_rel_uv_angle[bin] /= bin_counts[bin]; } } void sum_differences( int start_position, int num_histories ) { unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; cudaMalloc((void**) &bin_num_d, mem_size_hist_ints); cudaMalloc((void**) &WEPL_d, mem_size_hist_floats); cudaMalloc((void**) &xy_entry_angle_d, mem_size_hist_floats); cudaMalloc((void**) &xz_entry_angle_d, mem_size_hist_floats); cudaMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats); cudaMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats); //cudaMalloc((void**) &xy_exit_angle_d, mem_size_hist_floats); //cudaMalloc((void**) &xz_exit_angle_d, mem_size_hist_floats); cudaMalloc((void**) &relative_ut_angle_d, mem_size_hist_floats); cudaMalloc((void**) &relative_uv_angle_d, mem_size_hist_floats); cudaMemcpy( bin_num_d, &bin_num_vector[start_position], mem_size_hist_ints, cudaMemcpyHostToDevice); cudaMemcpy( WEPL_d, &WEPL_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice); cudaMemcpy( xy_entry_angle_d, &xy_entry_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice); cudaMemcpy( xz_entry_angle_d, &xz_entry_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice); //cudaMemcpy( xy_exit_angle_d, &xy_exit_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice); //cudaMemcpy( xz_exit_angle_d, &xz_exit_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice); cudaMemcpy( relative_ut_angle_d, &relative_ut_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice); cudaMemcpy( relative_uv_angle_d, &relative_uv_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice); dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1); sum_differences_kernel<<<dimGrid, dimBlock>>> ( num_histories, bin_num_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d, WEPL_d, xy_entry_angle_d, xz_entry_angle_d, xy_entry_angle_d, xz_entry_angle_d,//xy_exit_angle_d, xz_exit_angle_d, stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d, relative_ut_angle_d, relative_uv_angle_d ); cudaFree( bin_num_d ); cudaFree( WEPL_d ); cudaFree( xy_entry_angle_d ); cudaFree( xz_entry_angle_d ); //cudaFree( xy_exit_angle_d ); //cudaFree( xz_exit_angle_d ); cudaFree( relative_ut_angle_d ); cudaFree( relative_uv_angle_d ); } __global__ void sum_differences_kernel ( int num_histories, int* bin_num, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle, float* WEPL, float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle, float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle, float* relative_ut_angle, float* relative_uv_angle ) { float WEPL_difference, rel_ut_angle_difference, rel_uv_angle_difference; int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if( i < num_histories ) { /* float ut_diff = xy_exit_angle[i] - xy_entry_angle[i]; if( fabs(ut_diff) > PI ) { printf("Hello\n"); if( xy_entry_angle[i] > PI ) xy_entry_angle[i] -= TWO_PI; if( xy_exit_angle[i] > PI ) xy_exit_angle[i] -= TWO_PI; ut_diff = xy_exit_angle[i] - xy_entry_angle[i]; } float uv_diff = xz_exit_angle[i] - xz_entry_angle[i]; if( fabs(uv_diff) > PI ) { if( xz_entry_angle[i] > PI ) xz_entry_angle[i] -= TWO_PI; if( xz_exit_angle[i] > PI ) xz_exit_angle[i] -= TWO_PI; uv_diff = xz_exit_angle[i] - xz_entry_angle[i]; }*/ WEPL_difference = WEPL[i] - mean_WEPL[bin_num[i]]; rel_ut_angle_difference = relative_ut_angle[i] - mean_rel_ut_angle[bin_num[i]]; rel_uv_angle_difference = relative_uv_angle[i] - mean_rel_uv_angle[bin_num[i]]; //rel_ut_angle_difference = ut_diff - mean_rel_ut_angle[bin_num[i]]; //rel_uv_angle_difference = uv_diff - mean_rel_uv_angle[bin_num[i]]; atomicAdd( &stddev_WEPL[bin_num[i]], WEPL_difference * WEPL_difference); atomicAdd( &stddev_rel_ut_angle[bin_num[i]], rel_ut_angle_difference * rel_ut_angle_difference ); atomicAdd( &stddev_rel_uv_angle[bin_num[i]], rel_uv_angle_difference * rel_uv_angle_difference ); } } void calculate_std_devs() { dim3 dimBlock( T_BINS ); dim3 dimGrid( V_BINS, ANGULAR_BINS ); calculate_std_devs_kernel<<< dimGrid, dimBlock >>> ( bin_counts_d, stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d ); //cudaFree( bin_counts_d ); } __global__ void calculate_std_devs_kernel( int* bin_counts, float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle ) { int v = blockIdx.x, angle = blockIdx.y, t = threadIdx.x; int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS; if( bin_counts[bin] > 0 ) { // SAMPLE_STD_DEV = true/false = 1/0 => std_dev = SUM{i = 1 -> N} [ ( mu - x_i)^2 / ( N - 1/0 ) ] stddev_WEPL[bin] = sqrtf( stddev_WEPL[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) ); stddev_rel_ut_angle[bin] = sqrtf( stddev_rel_ut_angle[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) ); stddev_rel_uv_angle[bin] = sqrtf( stddev_rel_uv_angle[bin] / ( bin_counts[bin] - SAMPLE_STD_DEV ) ); } syncthreads(); bin_counts[bin] = 0; } void statistical_cuts( int start_position, int num_histories ) { unsigned int mem_size_hist_floats = sizeof(float) * num_histories; unsigned int mem_size_hist_ints = sizeof(int) * num_histories; unsigned int mem_size_hist_bools = sizeof(bool) * num_histories; passed_cuts_h = (bool*) calloc (num_histories, sizeof(bool) ); cudaMalloc( (void**) &bin_num_d, mem_size_hist_ints ); cudaMalloc( (void**) &WEPL_d, mem_size_hist_floats ); cudaMalloc( (void**) &xy_entry_angle_d, mem_size_hist_floats ); cudaMalloc( (void**) &xz_entry_angle_d, mem_size_hist_floats ); //cudaMalloc( (void**) &xy_exit_angle_d, mem_size_hist_floats ); //cudaMalloc( (void**) &xz_exit_angle_d, mem_size_hist_floats ); cudaMalloc( (void**) &relative_ut_angle_d, mem_size_hist_floats ); cudaMalloc( (void**) &relative_uv_angle_d, mem_size_hist_floats ); cudaMalloc( (void**) &passed_cuts_d, mem_size_hist_bools ); cudaMemcpy( bin_num_d, &bin_num_vector[start_position], mem_size_hist_ints, cudaMemcpyHostToDevice ); cudaMemcpy( WEPL_d, &WEPL_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice ); cudaMemcpy( xy_entry_angle_d, &xy_entry_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice ); cudaMemcpy( xz_entry_angle_d, &xz_entry_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice ); //cudaMemcpy( xy_exit_angle_d, &xy_exit_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice ); //cudaMemcpy( xz_exit_angle_d, &xz_exit_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice ); cudaMemcpy( relative_ut_angle_d, &relative_ut_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice ); cudaMemcpy( relative_uv_angle_d, &relative_uv_angle_vector[start_position], mem_size_hist_floats, cudaMemcpyHostToDevice ); cudaMemcpy( passed_cuts_d, passed_cuts_h, mem_size_hist_bools, cudaMemcpyHostToDevice ); //puts("Before kernel"); dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid( int( num_histories / THREADS_PER_BLOCK ) + 1 ); statistical_cuts_kernel<<< dimGrid, dimBlock >>> ( num_histories, bin_counts_d, bin_num_d, sinogram_d, WEPL_d, xy_entry_angle_d, xz_entry_angle_d, xy_entry_angle_d, xz_entry_angle_d,//xy_exit_angle_d, xz_exit_angle_d, mean_WEPL_d, mean_rel_ut_angle_d, mean_rel_uv_angle_d, stddev_WEPL_d, stddev_rel_ut_angle_d, stddev_rel_uv_angle_d, passed_cuts_d, relative_ut_angle_d, relative_uv_angle_d ); //puts("After kernel"); cudaMemcpy( passed_cuts_h, passed_cuts_d, mem_size_hist_bools, cudaMemcpyDeviceToHost); //printf("start iteration %d\n", iteration ); for( int i = 0; i < num_histories; i++ ) { if( passed_cuts_h[i] ) { //printf("start i = %d\n", i ); //printf("index = %d\n", start_position + i ); bin_num_vector[post_cut_histories] = bin_num_vector[start_position + i]; //gantry_angle_vector[post_cut_histories] = gantry_angle_vector[start_position + i]; WEPL_vector[post_cut_histories] = WEPL_vector[start_position + i]; x_entry_vector[post_cut_histories] = x_entry_vector[start_position + i]; y_entry_vector[post_cut_histories] = y_entry_vector[start_position + i]; z_entry_vector[post_cut_histories] = z_entry_vector[start_position + i]; x_exit_vector[post_cut_histories] = x_exit_vector[start_position + i]; y_exit_vector[post_cut_histories] = y_exit_vector[start_position + i]; z_exit_vector[post_cut_histories] = z_exit_vector[start_position + i]; xy_entry_angle_vector[post_cut_histories] = xy_entry_angle_vector[start_position + i]; xz_entry_angle_vector[post_cut_histories] = xz_entry_angle_vector[start_position + i]; //xy_exit_angle_vector[post_cut_histories] = xy_exit_angle_vector[start_position + i]; //xz_exit_angle_vector[post_cut_histories] = xz_exit_angle_vector[start_position + i]; relative_ut_angle_vector[post_cut_histories] = relative_ut_angle_vector[start_position + i]; relative_uv_angle_vector[post_cut_histories] = relative_uv_angle_vector[start_position + i]; post_cut_histories++; } } //printf("end iteration %d\n", iteration ); } __global__ void statistical_cuts_kernel ( int num_histories, int* bin_counts, int* bin_num, float* sinogram, float* WEPL, float* xy_entry_angle, float* xz_entry_angle, float* xy_exit_angle, float* xz_exit_angle, float* mean_WEPL, float* mean_rel_ut_angle, float* mean_rel_uv_angle, float* stddev_WEPL, float* stddev_rel_ut_angle, float* stddev_rel_uv_angle, bool* passed_cuts, float* relative_ut_angle, float* relative_uv_angle ) { int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if( i < num_histories ) { /*float ut_diff = xy_exit_angle[i] - xy_entry_angle[i]; if( ut_diff > PI ) { if( xy_entry_angle[i] > PI ) xy_entry_angle[i] -= TWO_PI; if( xy_exit_angle[i] > PI ) xy_exit_angle[i] -= TWO_PI; ut_diff = xy_exit_angle[i] - xy_entry_angle[i]; } float uv_diff = xz_exit_angle[i] - xz_entry_angle[i]; if( uv_diff > PI ) { if( xz_entry_angle[i] > PI ) xz_entry_angle[i] -= TWO_PI; if( xz_exit_angle[i] > PI ) xz_exit_angle[i] -= TWO_PI; uv_diff = xz_exit_angle[i] - xz_entry_angle[i]; }*/ bool passed_ut_cut = ( fabs( relative_ut_angle[i] - mean_rel_ut_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_ut_angle[bin_num[i]] ) ); bool passed_uv_cut = ( fabs( relative_uv_angle[i] - mean_rel_uv_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_uv_angle[bin_num[i]] ) ); /*bool passed_ut_cut = ( fabs( ut_diff - mean_rel_ut_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_ut_angle[bin_num[i]] ) ); bool passed_uv_cut = ( fabs( uv_diff - mean_rel_uv_angle[bin_num[i]] ) < ( SIGMAS_TO_KEEP * stddev_rel_uv_angle[bin_num[i]] ) );*/ bool passed_WEPL_cut = ( fabs( mean_WEPL[bin_num[i]] - WEPL[i] ) <= ( SIGMAS_TO_KEEP * stddev_WEPL[bin_num[i]] ) ); passed_cuts[i] = passed_ut_cut && passed_uv_cut && passed_WEPL_cut; if( passed_cuts[i] ) { atomicAdd( &sinogram[bin_num[i]], WEPL[i] ); atomicAdd( &bin_counts[bin_num[i]], 1 ); } } } /************************************************************************************************************************************************************/ /*********************************************************************** MLP ********************************************************************************/ /************************************************************************************************************************************************************/ void create_MLP_test_image() { double x, y; //Create space carve object, init to zeros MLP_test_image_h = (int*)calloc( MLP_IMAGE_VOXELS, sizeof(int)); for( int slice = 0; slice < MLP_IMAGE_SLICES; slice++ ) { for( int row = 0; row < MLP_IMAGE_ROWS; row++ ) { for( int column = 0; column < MLP_IMAGE_COLUMNS; column++ ) { x = ( column - MLP_IMAGE_COLUMNS/2 + 0.5) * MLP_IMAGE_VOXEL_WIDTH; y = ( MLP_IMAGE_ROWS/2 - row - 0.5 ) * MLP_IMAGE_VOXEL_HEIGHT; if( pow( x, 2 ) + pow( y, 2 ) <= pow( double(MLP_IMAGE_RECON_CYL_RADIUS), 2) ) MLP_test_image_h[slice * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS + row * MLP_IMAGE_COLUMNS + column] = 1; if( pow( x / MLP_PHANTOM_A, 2 ) + pow( y / MLP_PHANTOM_B, 2 ) <= 1 ) MLP_test_image_h[slice * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS + row * MLP_IMAGE_COLUMNS + column] = 8; } } } } void MLP_test() { char user_response[20]; float x_entry = -3.0; float y_entry = -sqrtf( pow(MLP_IMAGE_RECON_CYL_RADIUS, 2) - pow(x_entry,2) ); float z_entry = 0.0; float x_exit = 2.5; float y_exit = sqrtf( pow(MLP_IMAGE_RECON_CYL_RADIUS, 2) - pow(x_exit,2) ); float z_exit = 0.0; float xy_entry_angle = 25 * PI/180, xz_entry_angle = 0.0; float xy_exit_angle = 45* PI/180, xz_exit_angle = 0.0; float x_in_object, y_in_object, z_in_object; float u_in_object, t_in_object, v_in_object; float x_out_object, y_out_object, z_out_object; float u_out_object, t_out_object, v_out_object; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ float voxel_x, voxel_y, voxel_z; int voxel; int x_move_direction, y_move_direction, z_move_direction; int x_voxel_step, y_voxel_step, z_voxel_step; float x, y, z; float x_inside, y_inside, z_inside; float x_to_go, y_to_go, z_to_go; float delta_x, delta_y, delta_z; float x_extension, y_extension; float x_move, y_move, z_move; bool end_walk, outside_image; bool entered_object = false, exited_object = false; /********************************************************************************************************/ /******************** Determine if and Where the Proton Enters the Actual Object ************************/ /********************************************************************************************************/ /********************************************************************************************/ /************************** Initial and Boundary Conditions *********************************/ /********************************************************************************************/ // Initial Distance Into Voxel x_inside = modf( ( x_entry + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH; y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_entry ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT; z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_entry ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS; //printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z); //printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside); voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); //printf("voxel = %d \n", voxel ); /********************************************************************************************/ /***************************** Path and Walk Information ************************************/ /********************************************************************************************/ // Lengths/Distances as x is Incremented One Voxel delta_x = MLP_IMAGE_VOXEL_WIDTH; delta_y = tanf( xy_entry_angle ) * MLP_IMAGE_VOXEL_WIDTH; delta_z = tanf( xz_entry_angle ) * MLP_IMAGE_VOXEL_WIDTH; if( x_entry == x_exit ) { delta_x = 0; delta_y = MLP_IMAGE_VOXEL_HEIGHT; delta_z = tanf(xz_entry_angle) / tanf(xy_entry_angle) * MLP_IMAGE_VOXEL_HEIGHT; if( y_entry == y_exit ) { delta_x = 0; delta_y = 0; delta_z = MLP_IMAGE_VOXEL_THICKNESS; } } //printf("delta_x = %3f delta_y = %3f delta_z = %3f\n", delta_x, delta_y, delta_z ); x_move = 0, y_move = 0, z_move = 0; /*x_move_direction = ( x_entry <= x_exit ) - ( x_entry > x_exit ); y_move_direction = ( y_entry <= y_exit ) - ( y_entry > y_exit ); z_move_direction = ( z_entry <= z_exit ) - ( z_entry > z_exit );*/ x_move_direction = ( cosf(xy_entry_angle) >= 0 ) - ( cosf(xy_entry_angle) < 0 ); y_move_direction = ( sinf(xy_entry_angle) >= 0 ) - ( sinf(xy_entry_angle) < 0 ); z_move_direction = ( sinf(xy_entry_angle) >= 0 ) - ( sinf(xy_entry_angle) < 0 ); x_voxel_step = x_move_direction; y_voxel_step = -y_move_direction; z_voxel_step = -z_move_direction; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ x = x_entry, y = y_entry, z = z_entry; x_to_go = ( x_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_WIDTH - x_inside ) + ( x_voxel_step <= 0 ) * x_inside; y_to_go = ( y_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_HEIGHT - y_inside ) + ( y_voxel_step <= 0 ) * y_inside; z_to_go = ( z_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_THICKNESS - z_inside ) + ( z_voxel_step <= 0 ) * z_inside; //printf("initial values:\n\tx_to_go = %3f\n\ty_to_go = %3f\n\tz_to_go = %3f\n", x_to_go, y_to_go, z_to_go); outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { entered_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } end_walk = entered_object || outside_image; ///********************************************************************************************/ ///*********************************** Voxel Walk Routine *************************************/ ///********************************************************************************************/ if( z_entry != z_exit ) { while( !end_walk ) { // Change in z for Move to Voxel Edge in x and y x_extension = delta_z/delta_x * x_to_go; y_extension = delta_z/delta_y * y_to_go; if( z_to_go <= x_extension && z_to_go <= y_extension ) { //printf("z_to_go <= x_extension && z_to_go <= y_extension\n"); x_move = delta_x / delta_z * z_to_go; y_move = delta_y / delta_z * z_to_go; z_move = z_to_go; x_to_go -= x_move; y_to_go -= y_move; z_to_go = MLP_IMAGE_VOXEL_THICKNESS; voxel_z += z_voxel_step; if( x_to_go == 0 ) { voxel_x += x_voxel_step; x_to_go = MLP_IMAGE_VOXEL_WIDTH; } if( y_to_go == 0 ) { voxel_y += y_voxel_step; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; } } //If Next Voxel Edge is in x or xy Diagonal else if( x_extension <= y_extension ) { //printf(" x_extension <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; z_move = delta_z / delta_x * x_to_go; x_to_go = MLP_IMAGE_VOXEL_WIDTH; y_to_go -= y_move; z_to_go -= z_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; z_move = delta_z / delta_y * y_to_go; x_to_go -= x_move; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; z_to_go -= z_move; voxel_y += y_voxel_step; } voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { entered_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } x += x_move_direction * x_move; y += y_move_direction * y_move; z += z_move_direction * z_move; end_walk = entered_object || outside_image; } } else { //printf("z_exit == z_entry\n"); while( !end_walk ) { //printf("beginning of loop\n\n"); //printf("x = %3f y = %3f z = %3f\n", x, y, z ); //printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go); //printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n", voxel_x, voxel_y, voxel_z); // Change in x for Move to Voxel Edge in y y_extension = delta_x/delta_y * y_to_go; //printf("y_extension = %3f\n", y_extension); //If Next Voxel Edge is in x or xy Diagonal if( x_to_go <= y_extension ) { //printf(" x_to_go <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; x_to_go = MLP_IMAGE_VOXEL_WIDTH; y_to_go -= y_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; x_to_go -= x_move; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); //printf("end of loop\n\n"); //printf("x_move = %3f y_move = %3f\n", x_move, y_move ); //printf("x = %3f y = %3f z = %3f\n", x, y, z ); //printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go); //printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n\n", voxel_x, voxel_y, voxel_z); outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { entered_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } //printf("MLP_IMAGE_WIDTH/2 = %3f\n MLP_IMAGE_HEIGHT/2 = %3f", MLP_IMAGE_WIDTH/2 , MLP_IMAGE_HEIGHT/2 ); x += x_move_direction * x_move; y += y_move_direction * y_move; end_walk = entered_object || outside_image; //fgets(user_response, sizeof(user_response), stdin); }// end: while( !end_walk ) }//end: else: z_entry != z_exit => z_entry == z_exit if( entered_object ) { x_in_object = x; y_in_object = y; z_in_object = z; } /********************************************************************************************************/ /******************** Determine if and Where the Proton Exited the Actual Object ************************/ /********************************************************************************************************/ /********************************************************************************************/ /************************** Initial and Boundary Conditions *********************************/ /********************************************************************************************/ // Initial Distance Into Voxel x_inside = modf( ( x_exit + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH; y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_exit ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT; z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_exit ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS; //printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z); //printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside); voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); //printf("voxel = %d \n", voxel ); /********************************************************************************************/ /***************************** Path and Walk Information ************************************/ /********************************************************************************************/ // Lengths/Distances as x is Incremented One Voxel delta_x = MLP_IMAGE_VOXEL_WIDTH; delta_y = tanf( xy_exit_angle ) * MLP_IMAGE_VOXEL_WIDTH; delta_z = tanf( xz_exit_angle ) * MLP_IMAGE_VOXEL_WIDTH; if( x_entry == x_exit ) { delta_x = 0; delta_y = MLP_IMAGE_VOXEL_HEIGHT; delta_z = tanf(xz_exit_angle) / tanf(xy_exit_angle) * MLP_IMAGE_VOXEL_HEIGHT; if( y_entry == y_exit ) { delta_x = 0; delta_y = 0; delta_z = MLP_IMAGE_VOXEL_THICKNESS; } } //printf("delta_x = %3f delta_y = %3f delta_z = %3f\n", delta_x, delta_y, delta_z ); x_move = 0, y_move = 0, z_move = 0; //x_move_direction = ( x_exit <= x_entry ) - ( x_exit > x_entry ); //y_move_direction = ( y_exit <= y_entry ) - ( y_exit > y_entry ); //z_move_direction = ( z_exit <= z_entry ) - ( z_exit > z_entry ); x_move_direction = ( cosf(xy_exit_angle) < 0 ) - ( cosf(xy_exit_angle) >= 0 ); y_move_direction = ( sinf(xy_exit_angle) < 0 ) - ( sinf(xy_exit_angle) >= 0 ); z_move_direction = ( sinf(xy_exit_angle) < 0 ) - ( sinf(xy_exit_angle) >= 0 ); x_voxel_step = x_move_direction; y_voxel_step = -y_move_direction; z_voxel_step = -z_move_direction; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ x = x_exit, y = y_exit, z = z_exit; x_to_go = ( x_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_WIDTH - x_inside ) + ( x_voxel_step <= 0 ) * x_inside; y_to_go = ( y_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_HEIGHT - y_inside ) + ( y_voxel_step <= 0 ) * y_inside; z_to_go = ( z_voxel_step > 0 ) * ( MLP_IMAGE_VOXEL_THICKNESS - z_inside ) + ( z_voxel_step <= 0 ) * z_inside; //printf("initial values:\n\tx_to_go = %3f\n\ty_to_go = %3f\n\tz_to_go = %3f\n", x_to_go, y_to_go, z_to_go); outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { exited_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } end_walk = exited_object || outside_image; ///********************************************************************************************/ ///*********************************** Voxel Walk Routine *************************************/ ///********************************************************************************************/ if( z_entry != z_exit ) { //printf("z_entry != z_exit\n"); while( !end_walk ) { // Change in z for Move to Voxel Edge in x and y x_extension = delta_z/delta_x * x_to_go; y_extension = delta_z/delta_y * y_to_go; if( z_to_go <= x_extension && z_to_go <= y_extension ) { //printf("z_to_go <= x_extension && z_to_go <= y_extension\n"); x_move = delta_x / delta_z * z_to_go; y_move = delta_y / delta_z * z_to_go; z_move = z_to_go; x_to_go -= x_move; y_to_go -= y_move; z_to_go = MLP_IMAGE_VOXEL_THICKNESS; voxel_z += z_voxel_step; if( x_to_go == 0 ) { voxel_x += x_voxel_step; x_to_go = MLP_IMAGE_VOXEL_WIDTH; } if( y_to_go == 0 ) { voxel_y += y_voxel_step; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; } } //If Next Voxel Edge is in x or xy Diagonal else if( x_extension <= y_extension ) { //printf(" x_extension <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; z_move = delta_z / delta_x * x_to_go; x_to_go = MLP_IMAGE_VOXEL_WIDTH; y_to_go -= y_move; z_to_go -= z_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; z_move = delta_z / delta_y * y_to_go; x_to_go -= x_move; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; z_to_go -= z_move; voxel_y += y_voxel_step; } voxel = int( voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS ); outside_image = ( voxel_x >= MLP_IMAGE_COLUMNS ) || ( voxel_y >= MLP_IMAGE_ROWS ) || ( voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { exited_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } x += x_move_direction * x_move; y += y_move_direction * y_move; z += z_move_direction * z_move; end_walk = exited_object || outside_image; } } else { //printf("z_entry == z_exit\n"); while( !end_walk ) { //printf("beginning of loop\n\n"); //printf("x = %3f y = %3f z = %3f\n", x, y, z ); //printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go); //printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n", voxel_x, voxel_y, voxel_z); // Change in x for Move to Voxel Edge in y y_extension = delta_x/delta_y * y_to_go; //printf("y_extension = %3f\n", y_extension); //If Next Voxel Edge is in x or xy Diagonal if( x_to_go <= y_extension ) { //printf(" x_to_go <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; x_to_go = MLP_IMAGE_VOXEL_WIDTH; y_to_go -= y_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; x_to_go -= x_move; y_to_go = MLP_IMAGE_VOXEL_HEIGHT; voxel_y += y_voxel_step; } voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); /*printf("end of loop\n\n"); printf("x_move = %3f y_move = %3f\n", x_move, y_move ); printf("x = %3f y = %3f z = %3f\n", x, y, z ); printf("x_to_go = %3f y_to_go = %3f\n", x_to_go, y_to_go); printf("voxel_x = %3f voxel_y = %3f voxel_z = %3f\n\n", voxel_x, voxel_y, voxel_z);*/ outside_image = (voxel_x >= MLP_IMAGE_COLUMNS ) || (voxel_y >= MLP_IMAGE_ROWS ) || (voxel_z >= MLP_IMAGE_SLICES ); if( !outside_image ) { exited_object = MLP_test_image_h[voxel] == 8; MLP_test_image_h[voxel] = 4; } //printf("MLP_IMAGE_WIDTH/2 = %3f\n MLP_IMAGE_HEIGHT/2 = %3f",MLP_IMAGE_WIDTH/2 , MLP_IMAGE_HEIGHT/2 ); x += x_move_direction * x_move; y += y_move_direction * y_move; end_walk = exited_object || outside_image; //fgets(user_response, sizeof(user_response), stdin); }// end: while( !end_walk ) }//end: else: z_exit != z_exit => z_exit == z_exit if( exited_object ) { x_out_object = x; y_out_object = y; z_out_object = z; } x_inside = modf( ( x_in_object + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH; y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_in_object ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT; z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_in_object ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS; //printf("voxel_x = %3f \nvoxel_y = %3f \nvoxel_z = %3f\n", voxel_x, voxel_y, voxel_z); //printf("x_inside = %3f y_inside = %3f z_inside = %3f\n", x_inside, y_inside, z_inside); voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); int path[1000]; int path_index = 0; double chord_lengths[1000]; MLP_test_image_h[voxel] = 0; path[path_index++] = voxel; u_in_object = ( cosf( xy_entry_angle ) * x_in_object ) + ( sinf( xy_entry_angle ) * y_in_object ); u_out_object = ( cosf( xy_entry_angle ) * x_out_object ) + ( sinf( xy_entry_angle ) * y_out_object ); t_in_object = ( cosf( xy_entry_angle ) * y_in_object ) - ( sinf( xy_entry_angle ) * x_in_object ); t_out_object = ( cosf( xy_entry_angle ) * y_out_object ) - ( sinf( xy_entry_angle ) * x_out_object ); v_in_object = z_in_object; v_out_object = z_out_object; double T_0[2] = { t_in_object, 0 }; double T_2[2] = { t_out_object, xy_exit_angle - xy_entry_angle }; double V_0[2] = { v_in_object, xz_entry_angle }; double V_2[2] = { v_out_object, xz_exit_angle }; double u_2 = abs(u_out_object - u_in_object); double u_0 = 0, u_1 = MLP_u_step; double t_1_previous, v_1_previous; double x_1_previous = x, y_1_previous = y, z_1_previous = z; int voxel_x_previous = voxel_x; int voxel_y_previous = voxel_y; int voxel_z_previous = voxel_z; int voxel_previous = voxel; int voxels_passed; double chord_segment; double chord_fraction; double x_to_edge, y_to_edge, z_to_edge; //fgets(user_response, sizeof(user_response), stdin); while( u_1 <= u_2 - MLP_u_step ) { double R_0[4] = { 1.0, u_1 - u_0, 0.0 , 1.0}; //a,b,c,d double R_0T[4] = { 1.0, 0.0, u_1 - u_0 , 1.0}; //a,c,b,d double R_1[4] = { 1.0, u_2 - u_1, 0.0 , 1.0}; //a,b,c,d double R_1T[4] = { 1.0, 0.0, u_2 - u_1 , 1.0}; //a,c,b,d double sigma_1_coefficient = pow( E_0 * ( 1 + 0.038 * log( (u_1 - u_0)/X_0) ), 2.0 ) / X_0; float sigma_t1 = (A_0/3)*pow(u_1, 3.0) + (A_1/12)*pow(u_1, 4.0) + (A_2/30)*pow(u_1, 5.0) + (A_3/60)*pow(u_1, 6.0) + (A_4/105)*pow(u_1, 7.0) + (A_5/168)*pow(u_1, 8.0); float sigma_t1_theta1 = pow(u_1, 2.0 )*( (A_0/2) + (A_1/6)*u_1 + (A_2/12)*pow(u_1, 2.0) + (A_3/20)*pow(u_1, 3.0) + (A_4/30)*pow(u_1, 4.0) + (A_5/42)*pow(u_1, 5.0) ); float sigma_theta1 = A_0*u_1 + (A_1/2)*pow(u_1, 2.0) + (A_2/3)*pow(u_1, 3.0) + (A_3/4)*pow(u_1, 4.0) + (A_4/5)*pow(u_1, 5.0) + (A_5/6)*pow(u_1, 6.0); double determinant_Sigma_1 = sigma_t1 * sigma_theta1 - pow( sigma_t1_theta1, 2 );//ad-bc double Sigma_1I[4] = // Sigma_1 Inverse = [1/det(Sigma_1)]*{ d, -b, -c, a } { sigma_theta1 / determinant_Sigma_1, -sigma_t1_theta1 / determinant_Sigma_1, -sigma_t1_theta1 / determinant_Sigma_1, sigma_t1 / determinant_Sigma_1 }; double sigma_2_coefficient = pow( E_0 * ( 1 + 0.038 * log( (u_2 - u_1)/X_0 ) ), 2.0 ) / X_0; double sigma_t2 = (A_0/3)*pow(u_2, 3.0) + (A_1/12)*pow(u_2, 4.0) + (A_2/30)*pow(u_2, 5.0) + (A_3/60)*pow(u_2, 6.0) + (A_4/105)*pow(u_2, 7.0) + (A_5/168)*pow(u_2, 8.0) - (A_0/3)*pow(u_1, 3.0) - (A_1/4)*pow(u_1, 4.0) - (A_2/5)*pow(u_1, 5.0) - (A_3/6)*pow(u_1, 6.0) - (A_4/7)*pow(u_1, 7.0) - (A_5/8)*pow(u_1, 8.0) + 2*u_2*( (A_0/2)*pow(u_1, 2.0) + (A_1/3)*pow(u_1, 3.0) + (A_2/4)*pow(u_1, 4.0) + (A_3/5)*pow(u_1, 5.0) + (A_4/6)*pow(u_1, 6.0) + (A_5/7)*pow(u_1, 7.0) ) - pow(u_2, 2.0) * ( A_0*u_1 + (A_1/2)*pow(u_1, 2.0) + (A_2/3)*pow(u_1, 3.0) + (A_3/4)*pow(u_1, 4.0) + (A_4/5)*pow(u_1, 5.0) + (A_5/6)*pow(u_1, 6.0) ); double sigma_t2_theta2 = pow(u_2, 2.0 )*( (A_0/2) + (A_1/6)*u_2 + (A_2/12)*pow(u_2, 2.0) + (A_3/20)*pow(u_2, 3.0) + (A_4/30)*pow(u_2, 4.0) + (A_5/42)*pow(u_2, 5.0) ) - u_2*u_1*( A_0 + (A_1/2)*u_1 + (A_2/3)*pow(u_1, 2.0) + (A_3/4)*pow(u_1, 3.0) + (A_4/5)*pow(u_1, 4.0) + (A_5/6)*pow(u_1, 5.0) ) + pow(u_1, 2.0 )*( (A_0/2) + (A_1/3)*u_1 + (A_2/4)*pow(u_1, 2.0) + (A_3/5)*pow(u_1, 3.0) + (A_4/6)*pow(u_1, 4.0) + (A_5/7)*pow(u_1, 5.0) ); double sigma_theta2 = A_0 * ( u_2 - u_1 ) + ( A_1 / 2 ) * ( pow(u_2, 2.0) - pow(u_1, 2.0) ) + ( A_2 / 3 ) * ( pow(u_2, 3.0) - pow(u_1, 3.0) ) + ( A_3 / 4 ) * ( pow(u_2, 4.0) - pow(u_1, 4.0) ) + ( A_4 / 5 ) * ( pow(u_2, 5.0) - pow(u_1, 5.0) ) + ( A_5 /6 )*( pow(u_2, 6.0) - pow(u_1, 6.0) ); double determinant_Sigma_2 = sigma_t2 * sigma_theta2 - pow( sigma_t2_theta2, 2 );//ad-bc double Sigma_2I[4] = // Sigma_2 Inverse = [1/det(Sigma_2)]*{ d, -b, -c, a } { sigma_theta2 / determinant_Sigma_2, -sigma_t2_theta2 / determinant_Sigma_2, -sigma_t2_theta2 / determinant_Sigma_2, sigma_t2 / determinant_Sigma_2 }; double first_term[4] = { Sigma_1I[0] + R_1T[0] * ( Sigma_2I[0] * R_1[0] + Sigma_2I[1] * R_1[2] ) + R_1T[1] * ( Sigma_2I[2] * R_1[0] + Sigma_2I[3] * R_1[2] ), Sigma_1I[1] + R_1T[0] * ( Sigma_2I[0] * R_1[1] + Sigma_2I[1] * R_1[3] ) + R_1T[1] * ( Sigma_2I[2] * R_1[1] + Sigma_2I[3] * R_1[3] ), Sigma_1I[2] + R_1T[2] * ( Sigma_2I[0] * R_1[0] + Sigma_2I[1] * R_1[2] ) + R_1T[3] * ( Sigma_2I[2] * R_1[0] + Sigma_2I[3] * R_1[2] ), Sigma_1I[3] + R_1T[2] * ( Sigma_2I[0] * R_1[1] + Sigma_2I[1] * R_1[3] ) + R_1T[3] * ( Sigma_2I[2] * R_1[1] + Sigma_2I[3] * R_1[3] ) }; double determinant_first_term = first_term[0] * first_term[3] - first_term[1] * first_term[2]; first_term[0] = first_term[3] / determinant_first_term; first_term[1] = -first_term[1] / determinant_first_term; first_term[2] = -first_term[2] / determinant_first_term; first_term[3] = first_term[0] / determinant_first_term; double second_term[2] = { Sigma_1I[0] * ( R_0[0] * T_0[0] + R_0[1] * T_0[1] ) + Sigma_1I[1] * ( R_0[2] * T_0[0] + R_0[3] * T_0[1] ) + R_1T[0] * ( Sigma_2I[0] * T_2[0] + Sigma_2I[1] * T_2[1] ) + R_1T[1] * ( Sigma_2I[2] * T_2[0] + Sigma_2I[3] * T_2[1] ) , Sigma_1I[2] * ( R_0[0] * T_0[0] + R_0[1] * T_0[1] ) + Sigma_1I[3] * ( R_0[2] * T_0[0] + R_0[3] * T_0[1] ) + R_1T[2] * ( Sigma_2I[0] * T_2[0] + Sigma_2I[1] * T_2[1] ) + R_1T[3] * ( Sigma_2I[2] * T_2[0] + Sigma_2I[3] * T_2[1] ) }; double t_1 = first_term[0] * second_term[0] + first_term[1] * second_term[1]; double theta_1 = first_term[2] * second_term[0] + first_term[3] * second_term[1]; // Do v MLP Now second_term[0] = Sigma_1I[0] * ( R_0[0] * V_0[0] + R_0[1] * V_0[1] ) + Sigma_1I[1] * ( R_0[2] * V_0[0] + R_0[3] * V_0[1] ) + R_1T[0] * ( Sigma_2I[0] * V_2[0] + Sigma_2I[1] * V_2[1] ) + R_1T[1] * ( Sigma_2I[2] * V_2[0] + Sigma_2I[3] * V_2[1] ); second_term[1] = Sigma_1I[2] * ( R_0[0] * V_0[0] + R_0[1] * V_0[1] ) + Sigma_1I[3] * ( R_0[2] * V_0[0] + R_0[3] * V_0[1] ) + R_1T[2] * ( Sigma_2I[0] * V_2[0] + Sigma_2I[1] * V_2[1] ) + R_1T[3] * ( Sigma_2I[2] * V_2[0] + Sigma_2I[3] * V_2[1] ); double v_1 = first_term[0] * second_term[0] + first_term[1] * second_term[1]; double phi_1 = first_term[2] * second_term[0] + first_term[3] * second_term[1]; // Rotate Coordinate From utv to xyz Coordinate System and Determine Which Voxel this Point on the MLP Path is in double x_1 = ( cosf( xy_entry_angle ) * (u_in_object + u_1) ) - ( sinf( xy_entry_angle ) * t_1 ); double y_1 = ( sinf( xy_entry_angle ) * (u_in_object + u_1) ) + ( cosf( xy_entry_angle ) * t_1 ); double z_1 = v_in_object + v_1; x_inside = modf( ( x_1 + MLP_IMAGE_WIDTH/2 ) / MLP_IMAGE_VOXEL_WIDTH, &voxel_x ) * MLP_IMAGE_VOXEL_WIDTH; y_inside = modf( ( MLP_IMAGE_HEIGHT/2 - y_1 ) / MLP_IMAGE_VOXEL_HEIGHT, &voxel_y ) * MLP_IMAGE_VOXEL_HEIGHT; z_inside = modf( ( MLP_IMAGE_THICKNESS/2 - z_1 ) / MLP_IMAGE_VOXEL_THICKNESS, &voxel_z ) * MLP_IMAGE_VOXEL_THICKNESS; x_voxel_step = (voxel_x >= voxel_x_previous ) - (voxel_x <= voxel_x_previous ); y_voxel_step = (voxel_y >= voxel_y_previous ) - (voxel_y <= voxel_y_previous ); z_voxel_step = (voxel_z >= voxel_z_previous ) - (voxel_z <= voxel_z_previous ); x_to_edge = (x_voxel_step < 0) * x_inside + (x_voxel_step > 0) * (VOXEL_WIDTH - x_inside); y_to_edge = (y_voxel_step < 0) * y_inside + (y_voxel_step > 0) * (VOXEL_HEIGHT - y_inside); z_to_edge = (z_voxel_step < 0) * z_inside + (z_voxel_step > 0) * (VOXEL_THICKNESS - z_inside); voxel = int(voxel_x + voxel_y * MLP_IMAGE_COLUMNS + voxel_z * MLP_IMAGE_COLUMNS * MLP_IMAGE_ROWS); if( voxel != path[path_index - 1] ) path[path_index++] = voxel; for( int i = 0; i < path_index; i++ ) printf( "path[i] = %d\n", path[i] ); printf( "path_index = %d\n\n", path_index ); fgets(user_response, sizeof(user_response), stdin); MLP_test_image_h[voxel] = 0; voxels_passed = (voxel_x - voxel_x_previous) + (voxel_y - voxel_y_previous) + (voxel_z - voxel_z_previous); chord_segment = sqrt( pow( x_1_previous - x_1, 2 ) + pow( y_1_previous - y_1, 2 ) + pow( z_1_previous - z_1, 2 ) ); if( voxels_passed == 0 ) { chord_lengths[path_index - 1] += chord_segment; } else if( voxels_passed == 1 ) { if( x_voxel_step != 0 ) { chord_fraction = x_to_edge / (x_1_previous - x_1); } else if( y_voxel_step != 0 ) { chord_fraction = y_to_edge / (y_1_previous - y_1); } else { chord_fraction = z_to_edge / (z_1_previous - z_1); } chord_lengths[path_index - 1] += chord_fraction * chord_segment; chord_lengths[path_index] += chord_segment - chord_lengths[path_index - 1]; } else if( voxels_passed == 2 ) { } else if( voxels_passed == 3 ) { } u_1 += MLP_u_step; t_1_previous = t_1; v_1_previous = v_1; x_1_previous = x_1; y_1_previous = y_1; z_1_previous = z_1; voxel_x_previous = voxel_x; voxel_y_previous = voxel_y; voxel_z_previous = voxel_z; voxel_previous = voxel; } } /************************************************************************************************************************************************************/ /************************************************************************ FBP *******************************************************************************/ /************************************************************************************************************************************************************/ void initialize_sinogram() { sinogram_h = (float*) calloc( NUM_BINS, sizeof(float) ); cudaMalloc((void**) &sinogram_d, MEM_SIZE_BINS_FLOATS ); cudaMemcpy( sinogram_d, sinogram_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice ); } void construct_sinogram() { dim3 dimBlock( T_BINS ); dim3 dimGrid( V_BINS, ANGULAR_BINS ); construct_sinogram_kernel<<< dimGrid, dimBlock >>>( bin_counts_d, sinogram_d ); //cudaMemcpy(sinogram_h, sinogram_d, MEM_SIZE_BINS_FLOATS, cudaMemcpyDeviceToHost); //write_float_array_to_files("sinogram", output_directory, output_folder, sinogram_h, COLUMNS, ROWS, 3 ); //bin_counts_h = (int*) calloc( NUM_BINS, sizeof(int) ); //cudaMemcpy(bin_counts_h, bin_counts_d, MEM_SIZE_BINS_INTS, cudaMemcpyDeviceToHost) ; //write_integer_array_to_file( "bin_counts_post", output_directory, output_folder, bin_counts_h, T_BINS, ANGULAR_BINS, V_BINS ); } __global__ void construct_sinogram_kernel( int* bin_counts, float* sinogram ) { int v = blockIdx.x, angle = blockIdx.y, t = threadIdx.x; int bin = t + angle * T_BINS + v * T_BINS * ANGULAR_BINS; if( bin_counts[bin] > 0 ) sinogram[bin] /= bin_counts[bin]; } void filter() { puts("Doing the filtering..."); sinogram_filtered_h = (float*) calloc( NUM_BINS, sizeof(float) ); cudaMalloc((void**) &sinogram_filtered_d, MEM_SIZE_BINS_FLOATS); cudaMemcpy( sinogram_filtered_d, sinogram_filtered_h, MEM_SIZE_BINS_FLOATS, cudaMemcpyHostToDevice); dim3 dimBlock( T_BINS ); dim3 dimGrid( V_BINS, ANGULAR_BINS ); filter_kernel<<< dimGrid, dimBlock >>>( sinogram_d, sinogram_filtered_d ); cudaMemcpy(sinogram_filtered_h, sinogram_filtered_d, MEM_SIZE_BINS_FLOATS, cudaMemcpyDeviceToHost) ; free(sinogram_h); cudaFree(sinogram_d); cudaFree(sinogram_filtered_d); } __global__ void filter_kernel( float* sinogram, float* sinogram_filtered ) { int t_bin_ref,angle_bin,t_bin,v_bin,t_bin_sep; float filtered,t,v,scale_factor; v_bin = blockIdx.x; angle_bin = blockIdx.y; t_bin = threadIdx.x; v = ( v_bin - V_BINS/2 ) * V_BIN_SIZE + V_BIN_SIZE/2.0; // Loop over strips for this strip for( t_bin_ref = 0; t_bin_ref < T_BINS; t_bin_ref++ ) { t = ( t_bin_ref - T_BINS/2 ) * T_BIN_SIZE + T_BIN_SIZE/2.0; t_bin_sep = t_bin - t_bin_ref; // scale_factor = r . path = cos(theta_{r,path}) scale_factor = SOURCE_RADIUS / sqrtf( SOURCE_RADIUS * SOURCE_RADIUS + t * t + v * v ); switch( FILTER_NUM ) { case 0: // Ram-Lak if( t_bin_sep == 0 ) filtered = 1.0 / ( 8.0 * powf( T_BIN_SIZE, 2.0 ) ); else if( t_bin_sep % 2 == 0 ) filtered = 0; else filtered = -1.0 / ( 2.0 * powf( T_BIN_SIZE * PI * t_bin_sep, 2.0 ) ); case 1: // Shepp-Logan filter filtered = powf( powf(T_BIN_SIZE * PI, 2.0) * ( 1.0 - powf(2 * t_bin_sep, 2.0) ), -1.0 ); } int strip_index = ( v_bin * ANGULAR_BINS * T_BINS ) + ( angle_bin * T_BINS ); sinogram_filtered[strip_index + t_bin] += T_BIN_SIZE * sinogram[strip_index + t_bin_ref] * filtered * scale_factor; } } void backprojection() { puts("Doing the backprojection..."); printf("DEBUG: MEM_SIZE_IMAGE_FLOAT = %u\n", MEM_SIZE_IMAGE_FLOAT); // Allocate host memory puts("DEBUG: Allocate host memory"); char user_response[20]; X_h = (float*) calloc( VOXELS, sizeof(float) ); if( X_h == NULL ) { printf("ERROR: Memory not allocated for X_h!\n"); fgets(user_response, sizeof(user_response), stdin); exit(1); } // Check that we don't have any corruptions up until now for( int i = 0; i < NUM_BINS; i++ ) if( sinogram_filtered_h[i] != sinogram_filtered_h[i] ) printf("We have a nan in bin #%d\n", i); float delta = GANTRY_ANGLE_INTERVAL * ANGLE_TO_RADIANS; // Loop over the voxels for( int slice = 0; slice < SLICES; slice++ ) { for( int column = 0; column < COLUMNS; column++ ) { for( int row = 0; row < ROWS; row++ ) { // Initial Distance Into Voxel /* x_inside = modf( ( x_entry[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH; y_inside = modf( ( RECON_CYL_RADIUS - y_entry[i] ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT; z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry[i] ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS; voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); voxel_x_out = int( ( x_exit[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH ); voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit[i] ) /VOXEL_HEIGHT ); voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit[i] ) /VOXEL_THICKNESS );*/ // Get the spatial co-ordinates of the pixel /* float x, y, z; if( column > COLUMNS/2 ) x = -RECON_CYL_RADIUS + ( column - 0.5 )* VOXEL_WIDTH; else if( column < COLUMNS/2 ) x = -RECON_CYL_RADIUS + ( column + 0.5 )* VOXEL_WIDTH; else x = -RECON_CYL_RADIUS + column* VOXEL_WIDTH; if( column > ROWS/2 ) y = RECON_CYL_RADIUS - (row + 0.5) * VOXEL_HEIGHT; else if( column < ROWS/2 ) y = RECON_CYL_RADIUS - (row - 0.5) * VOXEL_HEIGHT; else y = RECON_CYL_RADIUS - row * VOXEL_HEIGHT; z = -RECON_CYL_HEIGHT / 2.0 + (slice + 0.5) * SLICE_THICKNESS;*/ float x = -RECON_CYL_RADIUS + ( column + 0.5 )* VOXEL_WIDTH; float y = RECON_CYL_RADIUS - (row + 0.5) * VOXEL_HEIGHT; float z = -RECON_CYL_HEIGHT / 2.0 + (slice + 0.5) * SLICE_THICKNESS; //// If the voxel is outside a cylinder contained in the reconstruction volume, set to air if( ( x * x + y * y ) > ( RECON_CYL_RADIUS * RECON_CYL_RADIUS ) ) X_h[( slice * COLUMNS * ROWS) + ( row * COLUMNS ) + column] = 0.00113; else { // Sum over projection angles for( int angle_bin = 0; angle_bin < ANGULAR_BINS; angle_bin++ ) { // Rotate the pixel position to the beam-detector co-ordinate system float u = x * cosf( angle_bin * delta ) + y * sinf( angle_bin * delta ); float t = -x * sinf( angle_bin * delta ) + y * cosf( angle_bin * delta ); float v = z; // Project to find the detector number float detector_number_t = ( t - u *( t / ( SOURCE_RADIUS + u ) ) ) / T_BIN_SIZE + T_BINS/2.0; int t_bin = int( detector_number_t); if( t_bin > detector_number_t ) t_bin -= 1; float eta = detector_number_t - t_bin; // Now project v to get detector number in v axis float detector_number_v = ( v - u * ( v / ( SOURCE_RADIUS + u ) ) ) / V_BIN_SIZE + V_BINS/2.0; int v_bin = int( detector_number_v); if( v_bin > detector_number_v ) v_bin -= 1; float epsilon = detector_number_v - v_bin; // Calculate the fan beam scaling factor float scale_factor = powf( SOURCE_RADIUS / ( SOURCE_RADIUS + u ), 2 ); //bin_num[i] = t_bin + angle_bin * T_BINS + v_bin * T_BINS * ANGULAR_BINS; // Compute the back-projection int bin = t_bin + angle_bin * T_BINS + v_bin * ANGULAR_BINS * T_BINS; int voxel = slice * COLUMNS * ROWS + row * COLUMNS + column; // not sure why this won't compile without calculating the index ahead of time instead inside []s int index = ANGULAR_BINS * T_BINS; //if( ( ( bin + ANGULAR_BINS * T_BINS + 1 ) >= NUM_BINS ) || ( bin < 0 ) ); if( v_bin == V_BINS - 1 || ( bin < 0 ) ) { X_h[voxel] += delta * 2 *( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin] + eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1]) * scale_factor; } //printf("The bin selected for this voxel does not exist!\n Slice: %d\n Column: %d\n Row: %d\n", slice, column, row); else { // not sure why this won't compile without calculating the index ahead of time instead inside []s /*X_h[voxel] += delta * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin] + eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1] + ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index] + eta * epsilon * sinogram_filtered_h[bin + index + 1] ) * scale_factor;*/ X_h[voxel] += delta * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin] + eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1] + ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index] + eta * epsilon * sinogram_filtered_h[bin + index + 1] ) * scale_factor; // Multilpying by the gantry angle interval for each gantry angle is equivalent to multiplying the final answer by 2*PI and is better numerically // so multiplying by delta each time should be replaced by X_h[voxel] *= 2 * PI after all contributions have been made, which is commented out below /*X_h[voxel] += scale_factor * ( ( 1 - eta ) * ( 1 - epsilon ) * sinogram_filtered_h[bin] + eta * ( 1 - epsilon ) * sinogram_filtered_h[bin + 1] + ( 1 - eta ) * epsilon * sinogram_filtered_h[bin + index] + eta * epsilon * sinogram_filtered_h[bin + index + 1] );*/ if(X_h[voxel]!=X_h[voxel]) printf("We have a nan in slice %d, column %d, and row %d\n", slice, column, row); } //X_h[voxel] *= 2 * PI; } } } } } free(sinogram_filtered_h); FBP_object_h = (int*) calloc( COLUMNS * ROWS * SLICES, sizeof(int) ); for( int slice = 0; slice < SLICES; slice++ ) { for( int row = 0; row < ROWS; row++ ) { for( int column = 0; column < COLUMNS; column++ ) { float x = -RECON_CYL_RADIUS + ( column + 0.5 )* VOXEL_WIDTH; float y = RECON_CYL_RADIUS - (row + 0.5) * VOXEL_HEIGHT; float d_squared = powf(x, 2) + powf(y, 2); //if(X_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] > FBP_THRESHOLD && (d < powf(RECON_CYL_RADIUS-1.5, 2) ) ) if(X_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] > FBP_THRESHOLD && (d_squared < powf(RECON_CYL_RADIUS, 2) ) ) FBP_object_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] = 1; else FBP_object_h[( slice * COLUMNS * ROWS ) + ( row * COLUMNS ) + column] = 0; } } } //// Set voxels outside reconstruction cylinder to zeros //for( int slice = 0; slice < SLICES; slice++ ) // for( int row = 0; row < ROWS; row++ ) // for( int column = 0; column < COLUMNS; column++ ) // {/* // float xv = ( column - COLUMNS/2 ) * VOXEL_WIDTH; // float yv = ( ROWS/2 - row ) * VOXEL_HEIGHT; // if( ( (xv * xv) + (yv * yv) ) >= float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) ) // FBP_object_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 0;*/ // float xv = int( column - COLUMNS/2 + 0.5); // float yv = int( ROWS/2 - row + 0.5); // if( ( (xv * xv) + (yv * yv) ) >= powf(COLUMNS/2, 2) ) // //if( ( (xv * xv) + (yv * yv) ) >= powf(COLUMNS/2- 3.0, 2) ) // FBP_object_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 0; // } //write_integer_array_to_files( "FBP_object", output_directory, output_folder, FBP_object_h, COLUMNS, ROWS, SLICES ); write_float_array_to_files( "X_h", output_directory, output_folder, X_h, COLUMNS, ROWS, SLICES ); write_integer_array_to_file( "x_FBP", output_directory, output_folder, FBP_object_h, COLUMNS, ROWS, SLICES ); } /************************************************************************************************************************************************************/ /****************************************************************** Image Initialization *******************************************************************/ /************************************************************************************************************************************************************/ void initialize_SC_hull( bool*& SC_hull_h, bool*& SC_hull_d ) { /* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */ /* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */ /* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */ /* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */ // Allocate memory for the hull image on the host and initialize to zeros SC_hull_h = (bool*)calloc( VOXELS, sizeof(bool)); float x, y; // Set the inner cylinder of the hull image to 1s for( int slice = 0; slice < SLICES; slice++ ) for( int row = 0; row < ROWS; row++ ) for( int column = 0; column < COLUMNS; column++ ) { x = ( column - COLUMNS/2 + 0.5) * VOXEL_WIDTH; y = ( ROWS/2 - row - 0.5) * VOXEL_HEIGHT; if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) ) SC_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = true; } // Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU cudaMalloc((void**) &SC_hull_d, MEM_SIZE_IMAGE_BOOL); cudaMemcpy(SC_hull_d, SC_hull_h, MEM_SIZE_IMAGE_BOOL, cudaMemcpyHostToDevice) ; } void initialize_MSC_hull( int*& MSC_hull_h, int*& MSC_hull_d ) { /* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */ /* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */ /* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */ /* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */ // Allocate memory for the hull image on the host and initialize to zeros MSC_hull_h = (int*)calloc( VOXELS, sizeof(int)); float x, y; // Set the inner cylinder of the hull image to 1s for( int slice = 0; slice < SLICES; slice++ ) for( int row = 0; row < ROWS; row++ ) for( int column = 0; column < COLUMNS; column++ ) { x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) ) MSC_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1; } // Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU cudaMalloc((void**) &MSC_hull_d, MEM_SIZE_IMAGE_INT); cudaMemcpy(MSC_hull_d, MSC_hull_h, MEM_SIZE_IMAGE_INT, cudaMemcpyHostToDevice) ; } void initialize_SM_hull( int*& SM_hull_h, int*& SM_hull_d ) { /* Allocate Memory and Initialize Images for Hull Detection Algorithms. Use the Image and */ /* Reconstruction Cylinder Parameters to Determine the Location of the Perimeter of the */ /* Reconstruction Cylinder, Which is Centered on the Origin (Center) of the Image. Assign */ /* Voxels Inside the Perimeter of the Reconstruction Volume the Value 1 and Those Outside 0 */ // Allocate memory for the hull image on the host and initialize to zeros SM_hull_h = (int*)calloc( VOXELS, sizeof(int)); float x, y; // Set the inner cylinder of the hull image to 1s for( int slice = 0; slice < SLICES; slice++ ) for( int row = 0; row < ROWS; row++ ) for( int column = 0; column < COLUMNS; column++ ) { x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; if( ( (x * x) + (y * y) ) < float(RECON_CYL_RADIUS * RECON_CYL_RADIUS) ) SM_hull_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1; } // Allocate memory for the initialized hull image on the GPU and then transfer it to the GPU cudaMalloc((void**) &SM_hull_d, MEM_SIZE_IMAGE_INT); cudaMemcpy(SM_hull_d, SM_hull_h, MEM_SIZE_IMAGE_INT, cudaMemcpyHostToDevice) ; } void initialize_float_image( float*& float_image_h, float*& float_image_d ) { //Create space carve object, init to zeros float_image_h = (float*)calloc( VOXELS, sizeof(float)); double x, y; // Set inner cylinder to 1s for( int slice = 0; slice < SLICES; slice++ ) for( int row = 0; row < ROWS; row++ ) for( int column = 0; column < COLUMNS; column++ ) { x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; if( ( (x * x) + (y * y) ) < double(RECON_CYL_RADIUS * RECON_CYL_RADIUS) ) float_image_h[slice * COLUMNS * ROWS + row * COLUMNS + column] = 1; } cudaMalloc((void**) &float_image_d, MEM_SIZE_IMAGE_FLOAT); cudaMemcpy(float_image_d, float_image_h, MEM_SIZE_IMAGE_FLOAT, cudaMemcpyHostToDevice) ; } /************************************************************************************************************************************************************/ /******************************************************************* Hull Detection *************************************************************************/ /************************************************************************************************************************************************************/ __device__ void voxel_walk( bool*& image, float x_entry, float y_entry, float z_entry, float x_exit, float y_exit, float z_exit ) { /********************************************************************************************/ /********************************* Voxel Walk Parameters ************************************/ /********************************************************************************************/ int x_move_direction, y_move_direction, z_move_direction; int x_voxel_step, y_voxel_step, z_voxel_step; float delta_x, delta_y, delta_z; float x_move, y_move, z_move; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ float x, y, z; float x_inside, y_inside, z_inside; float x_to_go, y_to_go, z_to_go; float x_extension, y_extension; float voxel_x, voxel_y, voxel_z; float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out; int voxel; bool outside_image, end_walk; /********************************************************************************************/ /************************** Initial and Boundary Conditions *********************************/ /********************************************************************************************/ // Initial Distance Into Voxel x_inside = modf( ( x_entry + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH; y_inside = modf( ( RECON_CYL_RADIUS - y_entry ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT; z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS; voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); voxel_x_out = int( ( x_exit + RECON_CYL_RADIUS ) /VOXEL_WIDTH ); voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit ) /VOXEL_HEIGHT ); voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit ) /VOXEL_THICKNESS ); voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS); /********************************************************************************************/ /***************************** Path and Walk Information ************************************/ /********************************************************************************************/ // Lengths/Distances as x is Incremented One Voxel delta_x = VOXEL_WIDTH; delta_y = abs( (y_exit - y_entry)/(x_exit - x_entry) * VOXEL_WIDTH ); delta_z = abs( (z_exit - z_entry)/(x_exit - x_entry) * VOXEL_WIDTH ); // Overwrite NaN if Divisors on delta_i Calculations Above if( x_entry == x_exit ) { delta_x = abs( (x_exit - x_entry)/(y_exit - y_entry) * VOXEL_HEIGHT ); delta_y = VOXEL_HEIGHT; delta_z = abs( (z_exit - z_entry)/(y_exit - y_entry) * VOXEL_HEIGHT ); if( y_entry == y_exit ) { delta_x = abs( (x_exit - x_entry)/(z_exit - z_entry) * VOXEL_THICKNESS ); delta_y = abs( (y_exit - y_entry)/(z_exit - z_entry) * VOXEL_THICKNESS );; delta_z = VOXEL_THICKNESS; } } x_move = 0, y_move = 0, z_move = 0; x_move_direction = ( x_entry <= x_exit ) - ( x_entry > x_exit ); y_move_direction = ( y_entry <= y_exit ) - ( y_entry > y_exit ); z_move_direction = ( z_entry <= z_exit ) - ( z_entry > z_exit ); x_voxel_step = x_move_direction; y_voxel_step = -y_move_direction; z_voxel_step = -z_move_direction; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ x = x_entry, y = y_entry, z = z_entry; x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside; y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside; z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside; outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) image[voxel] = 0; end_walk = ( voxel == voxel_out ) || outside_image; //fgets(user_response, sizeof(user_response), stdin); /********************************************************************************************/ /*********************************** Voxel Walk Routine *************************************/ /********************************************************************************************/ if( z_entry != z_exit ) { while( !end_walk ) { // Change in z for Move to Voxel Edge in x and y x_extension = delta_z/delta_x * x_to_go; y_extension = delta_z/delta_y * y_to_go; if( z_to_go <= x_extension && z_to_go <= y_extension ) { //printf("z_to_go <= x_extension && z_to_go <= y_extension\n"); x_move = delta_x / delta_z * z_to_go; y_move = delta_y / delta_z * z_to_go; z_move = z_to_go; x_to_go -= x_move; y_to_go -= y_move; z_to_go = VOXEL_THICKNESS; voxel_z += z_voxel_step; if( x_to_go == 0 ) { voxel_x += x_voxel_step; x_to_go = VOXEL_WIDTH; } if( y_to_go == 0 ) { voxel_y += y_voxel_step; y_to_go = VOXEL_HEIGHT; } } //If Next Voxel Edge is in x or xy Diagonal else if( x_extension <= y_extension ) { //printf(" x_extension <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; z_move = delta_z / delta_x * x_to_go; x_to_go = VOXEL_WIDTH; y_to_go -= y_move; z_to_go -= z_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; z_move = delta_z / delta_y * y_to_go; x_to_go -= x_move; y_to_go = VOXEL_HEIGHT; z_to_go -= z_move; voxel_y += y_voxel_step; } x += x_move_direction * x_move; y += y_move_direction * y_move; z += z_move_direction * z_move; //fgets(user_response, sizeof(user_response), stdin); voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) image[voxel] = 0; end_walk = ( voxel == voxel_out ) || outside_image; } } else { //printf("z_exit == z_entry\n"); while( !end_walk ) { // Change in x for Move to Voxel Edge in y y_extension = delta_x/delta_y * y_to_go; //If Next Voxel Edge is in x or xy Diagonal if( x_to_go <= y_extension ) { //printf(" x_to_go <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; x_to_go = VOXEL_WIDTH; y_to_go -= y_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; x_to_go -= x_move; y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } x += x_move_direction * x_move; y += y_move_direction * y_move; voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) image[voxel] = 0; end_walk = ( voxel == voxel_out ) || outside_image; //fgets(user_response, sizeof(user_response), stdin); }// end: while( !end_walk ) }//end: else: z_entry_h != z_exit_h => z_entry_h == z_exit_h } void SC( int num_histories ) { dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1); SC_kernel<<<dimGrid, dimBlock>>> ( num_histories, SC_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d, x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d ); } __global__ void SC_kernel ( int num_histories, bool* SC_image, int* bin_num, bool* traversed_recon_volume, float* WEPL, float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit ) { int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= SC_THRESHOLD) && (bin_num[i] >= 0) ) { voxel_walk( SC_image, x_entry[i], y_entry[i], z_entry[i], x_exit[i], y_exit[i], z_exit[i] ); }// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= PURE_SC_THRESH) && (bin_num[i] >= 0) ) } /************************************************************************************************************************************************************/ void MSC( int num_histories ) { dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1); MSC_kernel<<<dimGrid, dimBlock>>> ( num_histories, MSC_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d, x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d ); } __global__ void MSC_kernel ( int num_histories, int* MSC_image, int* bin_num, bool* traversed_recon_volume, float* WEPL, float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit ) { int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] < MSC_THRESHOLD) && (bin_num[i] >= 0) ) { //char user_response[20]; /********************************************************************************************/ /********************************* Voxel Walk Parameters ************************************/ /********************************************************************************************/ int x_move_direction, y_move_direction, z_move_direction; int x_voxel_step, y_voxel_step, z_voxel_step; float delta_x, delta_y, delta_z; float x_move, y_move, z_move; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ float x, y, z; float x_inside, y_inside, z_inside; float x_to_go, y_to_go, z_to_go; float x_extension, y_extension; float voxel_x, voxel_y, voxel_z; float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out; int voxel; bool outside_image, end_walk; /********************************************************************************************/ /************************** Initial and Boundary Conditions *********************************/ /********************************************************************************************/ // Initial Distance Into Voxel x_inside = modf( ( x_entry[i] + RECON_CYL_RADIUS) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH; y_inside = modf( ( RECON_CYL_RADIUS - y_entry[i]) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT; z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry[i]) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS; voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); voxel_x_out = int( ( x_exit[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH ); voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit[i] ) /VOXEL_HEIGHT ); voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit[i] ) /VOXEL_THICKNESS ); voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS); /********************************************************************************************/ /***************************** Path and Walk Information ************************************/ /********************************************************************************************/ // Lengths/Distances as x is Incremented One Voxel delta_x = VOXEL_WIDTH; delta_y = abs( (y_exit[i] - y_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH ); delta_z = abs( (z_exit[i] - z_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH ); // Overwrite NaN if Divisors on delta_i Calculations Above if( x_entry[i] == x_exit[i] ) { delta_x = abs( (x_exit[i] - x_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT ); delta_y = VOXEL_HEIGHT; delta_z = abs( (z_exit[i] - z_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT ); if( y_entry[i] == y_exit[i] ) { delta_x = abs( (x_exit[i] - x_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS ); delta_y = abs( (y_exit[i] - y_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );; delta_z = VOXEL_THICKNESS; } } x_move = 0, y_move = 0, z_move = 0; x_move_direction = ( x_entry[i] <= x_exit[i] ) - ( x_entry[i] > x_exit[i] ); y_move_direction = ( y_entry[i] <= y_exit[i] ) - ( y_entry[i] > y_exit[i] ); z_move_direction = ( z_entry[i] <= z_exit[i] ) - ( z_entry[i] > z_exit[i] ); x_voxel_step = x_move_direction; y_voxel_step = -y_move_direction; z_voxel_step = -z_move_direction; /********************************************************************************************/ /**************************** Status Tracking Information ***********************************/ /********************************************************************************************/ x = x_entry[i], y = y_entry[i], z = z_entry[i]; x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside; y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside; z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside; outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) atomicAdd( &MSC_image[voxel], 1 ); end_walk = ( voxel == voxel_out ) || outside_image; //fgets(user_response, sizeof(user_response), stdin); /********************************************************************************************/ /*********************************** Voxel Walk Routine *************************************/ /********************************************************************************************/ if( z_entry[i] != z_exit[i] ) { while( !end_walk ) { // Change in z for Move to Voxel Edge in x and y x_extension = delta_z/delta_x * x_to_go; y_extension = delta_z/delta_y * y_to_go; if( z_to_go <= x_extension && z_to_go <= y_extension ) { //printf("z_to_go <= x_extension && z_to_go <= y_extension\n"); x_move = delta_x / delta_z * z_to_go; y_move = delta_y / delta_z * z_to_go; z_move = z_to_go; x_to_go -= x_move; y_to_go -= y_move; z_to_go = VOXEL_THICKNESS; voxel_z += z_voxel_step; if( x_to_go == 0 ) { voxel_x += x_voxel_step; x_to_go = VOXEL_WIDTH; } if( y_to_go == 0 ) { voxel_y += y_voxel_step; y_to_go = VOXEL_HEIGHT; } } //If Next Voxel Edge is in x or xy Diagonal else if( x_extension <= y_extension ) { //printf(" x_extension <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; z_move = delta_z / delta_x * x_to_go; x_to_go = VOXEL_WIDTH; y_to_go -= y_move; z_to_go -= z_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; z_move = delta_z / delta_y * y_to_go; x_to_go -= x_move; y_to_go = VOXEL_HEIGHT; z_to_go -= z_move; voxel_y += y_voxel_step; } x += x_move_direction * x_move; y += y_move_direction * y_move; z += z_move_direction * z_move; //fgets(user_response, sizeof(user_response), stdin); voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) atomicAdd( &MSC_image[voxel], 1 ); end_walk = ( voxel == voxel_out ) || outside_image; } } else { //printf("z_exit[i] == z_entry[i]\n"); while( !end_walk ) { // Change in x for Move to Voxel Edge in y y_extension = delta_x/delta_y * y_to_go; //If Next Voxel Edge is in x or xy Diagonal if( x_to_go <= y_extension ) { //printf(" x_to_go <= y_extension \n"); x_move = x_to_go; y_move = delta_y / delta_x * x_to_go; x_to_go = VOXEL_WIDTH; y_to_go -= y_move; voxel_x += x_voxel_step; if( y_to_go == 0 ) { y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } } // Else Next Voxel Edge is in y else { //printf(" y_extension < x_extension \n"); x_move = delta_x / delta_y * y_to_go; y_move = y_to_go; x_to_go -= x_move; y_to_go = VOXEL_HEIGHT; voxel_y += y_voxel_step; } x += x_move_direction * x_move; y += y_move_direction * y_move; voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); if( !outside_image ) atomicAdd( &MSC_image[voxel], 1 ); end_walk = ( voxel == voxel_out ) || outside_image; //fgets(user_response, sizeof(user_response), stdin); }// end: while( !end_walk ) }//end: else: z_entry[i] != z_exit[i] => z_entry[i] == z_exit[i] }// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] <= PURE_SC_THRESH) && (bin_num[i] >= 0) ) } void MSC_threshold() { cudaMemcpy(MSC_image_h, MSC_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost); write_integer_array_to_files("MSC_image", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES ); dim3 dimBlock( SLICES ); dim3 dimGrid( COLUMNS, ROWS ); MSC_threshold_kernel<<< dimGrid, dimBlock >>>( MSC_image_d ); cudaMemcpy(MSC_image_h, MSC_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost); write_integer_array_to_files("MSC_image_thresholded", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES ); write_integer_array_to_file("x_MSC", output_directory, output_folder, MSC_image_h, COLUMNS, ROWS, SLICES ); cudaFree( MSC_image_d ); free(MSC_image_h); } __global__ void MSC_threshold_kernel( int* MSC_image ) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; int difference, max_difference = 0; if( (row != 0) && (row != ROWS - 1) && (column != 0) && (column != COLUMNS - 1) ) { for( int current_row = row - 1; current_row <= row + 1; current_row++ ) { for( int current_column = column - 1; current_column <= column + 1; current_column++ ) { difference = MSC_image[voxel] - MSC_image[current_column + current_row * COLUMNS + slice * COLUMNS * ROWS]; if( difference > max_difference ) max_difference = difference; } } } syncthreads(); if( max_difference > MSC_DIFF_THRESH ) MSC_image[voxel] = 0; else if( MSC_image[voxel] == 0 ) MSC_image[voxel] = 0; else MSC_image[voxel] = 1; if( x * x + y * y > RECON_CYL_RADIUS * RECON_CYL_RADIUS ) MSC_image[voxel] = 0; } /************************************************************************************************************************************************************/ void SM( int num_histories) { dim3 dimBlock(THREADS_PER_BLOCK); dim3 dimGrid((int)(num_histories/THREADS_PER_BLOCK)+1); SM_kernel<<<dimGrid, dimBlock>>> ( num_histories, SM_image_d, bin_num_d, traversed_recon_volume_d, WEPL_d, x_entry_d, y_entry_d, z_entry_d, x_exit_d, y_exit_d, z_exit_d ); } __global__ void SM_kernel ( int num_histories, int* SM_image, int* bin_num, bool* traversed_recon_volume, float* WEPL, float* x_entry, float* y_entry, float* z_entry, float* x_exit, float* y_exit, float* z_exit ) { int i = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; //if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] >= SM_LOWER_THRESHOLD) && (bin_num[i] >= 0) ) //{ // //char user_response[20]; // /********************************************************************************************/ // /********************************* Voxel Walk Parameters ************************************/ // /********************************************************************************************/ // int x_move_direction, y_move_direction, z_move_direction; // int x_voxel_step, y_voxel_step, z_voxel_step; // float delta_x, delta_y, delta_z; // float x_move, y_move, z_move; // /********************************************************************************************/ // /**************************** Status Tracking Information ***********************************/ // /********************************************************************************************/ // float x, y, z; // float x_inside, y_inside, z_inside; // float x_to_go, y_to_go, z_to_go; // float x_extension, y_extension; // float voxel_x, voxel_y, voxel_z; // float voxel_x_out, voxel_y_out, voxel_z_out, voxel_out; // int voxel; // bool outside_image, end_walk; // /********************************************************************************************/ // /************************** Initial and Boundary Conditions *********************************/ // /********************************************************************************************/ // // Initial Distance Into Voxel // x_inside = modf( ( x_entry[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH, &voxel_x)*VOXEL_WIDTH; // y_inside = modf( ( RECON_CYL_RADIUS - y_entry[i] ) /VOXEL_HEIGHT, &voxel_y)*VOXEL_HEIGHT; // z_inside = modf( ( RECON_CYL_HEIGHT/2 - z_entry[i] ) /VOXEL_THICKNESS, &voxel_z)*VOXEL_THICKNESS; // voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); // voxel_x_out = int( ( x_exit[i] + RECON_CYL_RADIUS ) /VOXEL_WIDTH ); // voxel_y_out = int( ( RECON_CYL_RADIUS - y_exit[i] ) /VOXEL_HEIGHT ); // voxel_z_out = int( ( RECON_CYL_HEIGHT/2 - z_exit[i] ) /VOXEL_THICKNESS ); // voxel_out = int(voxel_x_out + voxel_y_out * COLUMNS + voxel_z_out * COLUMNS * ROWS); // /********************************************************************************************/ // /***************************** Path and Walk Information ************************************/ // /********************************************************************************************/ // // Lengths/Distances as x is Incremented One Voxel // delta_x = VOXEL_WIDTH; // delta_y = abs( (y_exit[i] - y_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH ); // delta_z = abs( (z_exit[i] - z_entry[i])/(x_exit[i] - x_entry[i]) * VOXEL_WIDTH ); // // Overwrite NaN if Divisors on delta_i Calculations Above // if( x_entry[i] == x_exit[i] ) // { // delta_x = abs( (x_exit[i] - x_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT ); // delta_y = VOXEL_HEIGHT; // delta_z = abs( (z_exit[i] - z_entry[i])/(y_exit[i] - y_entry[i]) * VOXEL_HEIGHT ); // if( y_entry[i] == y_exit[i] ) // { // delta_x = abs( (x_exit[i] - x_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS ); // delta_y = abs( (y_exit[i] - y_entry[i])/(z_exit[i] - z_entry[i]) * VOXEL_THICKNESS );; // delta_z = VOXEL_THICKNESS; // } // } // x_move = 0, y_move = 0, z_move = 0; // x_move_direction = ( x_entry[i] <= x_exit[i] ) - ( x_entry[i] > x_exit[i] ); // y_move_direction = ( y_entry[i] <= y_exit[i] ) - ( y_entry[i] > y_exit[i] ); // z_move_direction = ( z_entry[i] <= z_exit[i] ) - ( z_entry[i] > z_exit[i] ); // x_voxel_step = x_move_direction; // y_voxel_step = -y_move_direction; // z_voxel_step = -z_move_direction; // /********************************************************************************************/ // /**************************** Status Tracking Information ***********************************/ // /********************************************************************************************/ // x = x_entry[i], y = y_entry[i], z = z_entry[i]; // x_to_go = ( x_voxel_step > 0 ) * (VOXEL_WIDTH - x_inside) + ( x_voxel_step <= 0 ) * x_inside; // y_to_go = ( y_voxel_step > 0 ) * (VOXEL_HEIGHT - y_inside) + ( y_voxel_step <= 0 ) * y_inside; // z_to_go = ( z_voxel_step > 0 ) * (VOXEL_THICKNESS - z_inside) + ( z_voxel_step <= 0 ) * z_inside; // // outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); // if( !outside_image ) // atomicAdd( &SM_image[voxel], 1 ); // end_walk = ( voxel == voxel_out ) || outside_image; // //fgets(user_response, sizeof(user_response), stdin); // /********************************************************************************************/ // /*********************************** Voxel Walk Routine *************************************/ // /********************************************************************************************/ // if( z_entry[i] != z_exit[i] ) // { // while( !end_walk ) // { // // Change in z for Move to Voxel Edge in x and y // x_extension = delta_z/delta_x * x_to_go; // y_extension = delta_z/delta_y * y_to_go; // if( z_to_go <= x_extension && z_to_go <= y_extension ) // { // //printf("z_to_go <= x_extension && z_to_go <= y_extension\n"); // x_move = delta_x / delta_z * z_to_go; // y_move = delta_y / delta_z * z_to_go; // z_move = z_to_go; // x_to_go -= x_move; // y_to_go -= y_move; // z_to_go = VOXEL_THICKNESS; // voxel_z += z_voxel_step; // if( x_to_go == 0 ) // { // voxel_x += x_voxel_step; // x_to_go = VOXEL_WIDTH; // } // if( y_to_go == 0 ) // { // voxel_y += y_voxel_step; // y_to_go = VOXEL_HEIGHT; // } // } // //If Next Voxel Edge is in x or xy Diagonal // else if( x_extension <= y_extension ) // { // //printf(" x_extension <= y_extension \n"); // x_move = x_to_go; // y_move = delta_y / delta_x * x_to_go; // z_move = delta_z / delta_x * x_to_go; // x_to_go = VOXEL_WIDTH; // y_to_go -= y_move; // z_to_go -= z_move; // voxel_x += x_voxel_step; // if( y_to_go == 0 ) // { // y_to_go = VOXEL_HEIGHT; // voxel_y += y_voxel_step; // } // } // // Else Next Voxel Edge is in y // else // { // //printf(" y_extension < x_extension \n"); // x_move = delta_x / delta_y * y_to_go; // y_move = y_to_go; // z_move = delta_z / delta_y * y_to_go; // x_to_go -= x_move; // y_to_go = VOXEL_HEIGHT; // z_to_go -= z_move; // voxel_y += y_voxel_step; // } // x += x_move_direction * x_move; // y += y_move_direction * y_move; // z += z_move_direction * z_move; // //fgets(user_response, sizeof(user_response), stdin); // voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); // outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); // if( !outside_image ) // atomicAdd( &SM_image[voxel], 1 ); // end_walk = ( voxel == voxel_out ) || outside_image; // } // } // else // { // //printf("z_exit[i] == z_entry[i]\n"); // while( !end_walk ) // { // // Change in x for Move to Voxel Edge in y // y_extension = delta_x/delta_y * y_to_go; // //If Next Voxel Edge is in x or xy Diagonal // if( x_to_go <= y_extension ) // { // //printf(" x_to_go <= y_extension \n"); // x_move = x_to_go; // y_move = delta_y / delta_x * x_to_go; // x_to_go = VOXEL_WIDTH; // y_to_go -= y_move; // voxel_x += x_voxel_step; // if( y_to_go == 0 ) // { // y_to_go = VOXEL_HEIGHT; // voxel_y += y_voxel_step; // } // } // // Else Next Voxel Edge is in y // else // { // //printf(" y_extension < x_extension \n"); // x_move = delta_x / delta_y * y_to_go; // y_move = y_to_go; // x_to_go -= x_move; // y_to_go = VOXEL_HEIGHT; // voxel_y += y_voxel_step; // } // x += x_move_direction * x_move; // y += y_move_direction * y_move; // voxel = int(voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS); // outside_image = ( voxel_x >= COLUMNS ) || ( voxel_y >= ROWS ) || ( voxel_z >= SLICES ); // if( !outside_image ) // atomicAdd( &SM_image[voxel], 1 ); // end_walk = ( voxel == voxel_out ) || outside_image; // //fgets(user_response, sizeof(user_response), stdin); // }// end: while( !end_walk ) // }//end: else: z_entry[i] != z_exit[i] => z_entry[i] == z_exit[i] //}// end: if( (i < num_histories) && traversed_recon_volume[i] && (WEPL[i] >= SPACE_MODEL_LOWER_THRESHOLD) && (WEPL[i] <= SPACE_MODEL_UPPER_THRESHOLD) && (bin_num[i] >= 0) ) } void MSC_differences() { int* MSC_differences_h = (int*) calloc( VOXELS, sizeof(int) ); int* MSC_differences_d; cudaMalloc((void**) &MSC_differences_d, MEM_SIZE_IMAGE_INT ); cudaMemcpy( MSC_differences_d, MSC_differences_h, MEM_SIZE_IMAGE_INT, cudaMemcpyHostToDevice ); dim3 dimBlock( SLICES ); dim3 dimGrid( COLUMNS, ROWS ); MSC_differences_kernel<<< dimGrid, dimBlock >>>( MSC_image_d, MSC_differences_d ); } void SM_differences() { int* SM_differences_h = (int*) calloc( VOXELS, sizeof(int) ); int* SM_differences_d; cudaMalloc((void**) &SM_differences_d, MEM_SIZE_IMAGE_INT ); cudaMemcpy( SM_differences_d, SM_differences_h, MEM_SIZE_IMAGE_INT, cudaMemcpyHostToDevice ); SM_differences_kernel<<< ( COLUMNS, ROWS ), SLICES >>>( SM_image_d, SM_differences_d ); SM_threshold_search_kernel<<< ( COLUMNS, ROWS ), SLICES >>>( SM_image_d, SM_differences_d ); } __global__ void SM_differences_kernel( int* SM_image, int* SM_differences) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( voxel < VOXELS && row < COLUMNS - 1) { SM_differences[voxel] = abs(SM_image[voxel] - SM_image[voxel - ROWS]); if( column < COLUMNS - 1 ) { int difference_right = abs(SM_image[voxel] - SM_image[voxel + 1]); if( difference_right > SM_differences[voxel] ) SM_differences[voxel] = -difference_right; } } /*syncthreads(); int max_difference = 0; int index = 0; voxel = slice * COLUMNS * ROWS; for( ; voxel < voxel + COLUMNS * ROWS; voxel++ ) { if( SM_differences[voxel] > max_difference ) { max_difference = SM_differences[voxel]; index = voxel; } } int threshold = 0; bool down = SM_differences[index] > 0; if( down ) threshold = max(SM_image[index], SM_image[index + COLUMNS]); else threshold = max(SM_image[index], SM_image[index + 1]); syncthreads(); voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( SM_image[voxel] >= SM_THRESHOLD_MULTIPLIER * threshold ) SM_image[voxel] = 0; else SM_image[voxel] = 1;*/ } __global__ void SM_threshold_search_kernel( int* SM_image, int* SM_differences ) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; int max_difference = 0; int index = 0; int voxel = slice * COLUMNS * ROWS; for( ; voxel < voxel + COLUMNS * ROWS; voxel++ ) { if( SM_differences[voxel] > max_difference ) { max_difference = SM_differences[voxel]; index = voxel; } } int threshold = 0; bool down = SM_differences[index] > 0; if( down ) threshold = max(SM_image[index], SM_image[index + COLUMNS]); else threshold = max(SM_image[index], SM_image[index + 1]); syncthreads(); voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( SM_image[voxel] >= SM_THRESHOLD_MULTIPLIER * threshold ) SM_image[voxel] = 0; else SM_image[voxel] = 1; } __global__ void MSC_differences_kernel( int* MSC_image, int* MSC_differences) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( voxel < VOXELS && row < COLUMNS - 1) { float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; if( MSC_image[voxel] - MSC_image[voxel - ROWS] > MSC_DIFF_THRESH ) MSC_image[voxel] = 0; else if( MSC_image[voxel - ROWS] - MSC_image[voxel] > MSC_DIFF_THRESH ) MSC_image[voxel - ROWS] = 0; else if( column < COLUMNS - 1 ) { if( MSC_image[voxel] - MSC_image[voxel + 1] > MSC_DIFF_THRESH ) MSC_image[voxel] = 0; else if( MSC_image[voxel + 1] - MSC_image[voxel] > MSC_DIFF_THRESH ) MSC_image[voxel + 1] = 0; } else MSC_image[voxel] = 1; if( x * x + y * y > RECON_CYL_RADIUS * RECON_CYL_RADIUS ) MSC_image[voxel] = 0; } } void SM_threshold() { // Copy the space modeled image from the GPU to the CPU and write it to file. cudaMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost); write_integer_array_to_files("SM_image", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES ); int* SM_differences_h = (int*) calloc( VOXELS, sizeof(int) ); int* SM_differences_d; cudaMalloc((void**) &SM_differences_d, MEM_SIZE_IMAGE_INT ); cudaMemcpy( SM_differences_d, SM_differences_h, MEM_SIZE_IMAGE_INT, cudaMemcpyHostToDevice ); dim3 dimBlock( SLICES ); dim3 dimGrid( COLUMNS, ROWS ); carve_differences<<< dimGrid, dimBlock >>>( SM_differences_d, SM_image_d ); cudaMemcpy( SM_differences_h, SM_differences_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost ); int* SM_thresholds_h = (int*) calloc( SLICES, sizeof(int) ); int voxel; int max_difference = 0; for( int slice = 0; slice < SLICES; slice++ ) { for( int pixel = 0; pixel < COLUMNS * ROWS; pixel++ ) { voxel = pixel + slice * COLUMNS * ROWS; if( SM_differences_h[voxel] > max_difference ) { max_difference = SM_differences_h[voxel]; SM_thresholds_h[slice] = SM_image_h[voxel]; } } printf( "Slice %d : The maximum space_model difference = %d and the space_model threshold = %d\n", slice, max_difference, SM_thresholds_h[slice] ); max_difference = 0; } int* SM_thresholds_d; unsigned int threshold_size = SLICES * sizeof(int); cudaMalloc((void**) &SM_thresholds_d, threshold_size ); cudaMemcpy( SM_thresholds_d, SM_thresholds_h, threshold_size, cudaMemcpyHostToDevice ); SM_threshold_kernel<<< dimGrid, dimBlock >>>( SM_image_d, SM_thresholds_d); cudaMemcpy(SM_image_h, SM_image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost); //write_integer_array_to_files("space_model_thresholded", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES ); write_integer_array_to_file("x_SM", output_directory, output_folder, SM_image_h, COLUMNS, ROWS, SLICES ); cudaFree( SM_differences_d ); cudaFree( SM_thresholds_d ); cudaFree( SM_image_d ); free(SM_differences_h); free(SM_thresholds_h); free(SM_image_h); } __global__ void SM_threshold_kernel( int* SM_image, int* SM_threshold ) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; float x = ( column - COLUMNS/2 + 0.5 ) * VOXEL_WIDTH; float y = ( ROWS/2 - row - 0.5 ) * VOXEL_HEIGHT; int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( voxel < VOXELS ) { if( SM_image[voxel] > 1.0 * SM_threshold[slice] ) SM_image[voxel] = 1; else SM_image[voxel] = 0; if( x * x + y * y > RECON_CYL_RADIUS * RECON_CYL_RADIUS ) SM_image[voxel] = 0; } } /************************************************************************************************************************************************************/ __global__ void carve_differences( int* carve_differences, int* image ) { int row = blockIdx.y, column = blockIdx.x, slice = threadIdx.x; int voxel = column + row * COLUMNS + slice * COLUMNS * ROWS; if( (row != 0) && (row != ROWS - 1) && (column != 0) && (column != COLUMNS - 1) ) { int difference, max_difference = 0; for( int current_row = row - 1; current_row <= row + 1; current_row++ ) { for( int current_column = column - 1; current_column <= column + 1; current_column++ ) { difference = image[voxel] - image[current_column + current_row * COLUMNS + slice * COLUMNS * ROWS]; if( difference > max_difference ) max_difference = difference; } } carve_differences[voxel] = max_difference; } } void averaging_filter( bool*& image_h, bool*& image_d, int filter_size ) { initialize_SC_hull(image_h, image_d); float threshold = 0; dim3 dimBlock( SLICES ); dim3 dimGrid( COLUMNS, ROWS ); averaging_filter_kernel<<< dimGrid, dimBlock >>>( image_d, filter_size, threshold); cudaMemcpy(image_h, image_d, MEM_SIZE_IMAGE_INT, cudaMemcpyDeviceToHost) ; write_bool_array_to_file( "test", output_directory, output_folder, image_h, COLUMNS, ROWS, SLICES ); } __global__ void averaging_filter_kernel( bool* image, int filter_size, float threshold ) { int voxel_x = blockIdx.x; int voxel_y = blockIdx.y; int voxel_z = threadIdx.x; int voxel = voxel_x + voxel_y * COLUMNS + voxel_z * COLUMNS * ROWS; int sum = image[voxel]; if( (voxel_x > 0) && (voxel_y > 0) && (voxel_x < COLUMNS - 1) && (voxel_y < ROWS - 1) ) { for( int i = voxel_x - filter_size/2; i <= voxel_x + filter_size/2; i++ ) for( int j = voxel_y - filter_size/2; j <= voxel_y + filter_size/2; j++ ) sum += image[i + j * COLUMNS + voxel_z * COLUMNS * ROWS]; } //value[voxel] = sum > threshold; syncthreads(); image[voxel] = sum > threshold; } /************************************************************************************************************************************************************/ /******************************************************** Memory Transfers, Maintenance, and Cleaning *******************************************************/ /************************************************************************************************************************************************************/ void initial_processing_memory_clean() { free( gantry_angle_h ); cudaFree( x_entry_d ); cudaFree( y_entry_d ); cudaFree( z_entry_d ); cudaFree( x_exit_d ); cudaFree( y_exit_d ); cudaFree( z_exit_d ); cudaFree( traversed_recon_volume_d ); cudaFree( bin_num_d ); cudaFree( WEPL_d); } void post_cut_memory_clean() { free(passed_cuts_h ); free(stddev_rel_ut_angle_h); free(stddev_rel_uv_angle_h); free(stddev_WEPL_h); cudaFree( passed_cuts_d ); cudaFree( bin_num_d ); cudaFree( WEPL_d ); cudaFree( xy_entry_angle_d ); cudaFree( xz_entry_angle_d ); //cudaFree( xy_exit_angle_d ); //cudaFree( xz_exit_angle_d ); cudaFree( relative_ut_angle_d ); cudaFree( relative_uv_angle_d ); cudaFree( mean_rel_ut_angle_d ); cudaFree( mean_rel_uv_angle_d ); cudaFree( mean_WEPL_d ); cudaFree( stddev_rel_ut_angle_d ); cudaFree( stddev_rel_uv_angle_d ); cudaFree( stddev_WEPL_d ); } void resize_vectors( int new_size ) { bin_num_vector.resize( new_size ); //gantry_angle_vector.resize( new_size ); WEPL_vector.resize( new_size ); x_entry_vector.resize( new_size ); y_entry_vector.resize( new_size ); z_entry_vector.resize( new_size ); x_exit_vector.resize( new_size ); y_exit_vector.resize( new_size ); z_exit_vector.resize( new_size ); xy_entry_angle_vector.resize( new_size ); xz_entry_angle_vector.resize( new_size ); //xy_exit_angle_vector.resize( new_size ); //xz_exit_angle_vector.resize( new_size ); relative_ut_angle_vector.resize( new_size ); relative_uv_angle_vector.resize( new_size ); } void shrink_vectors( int new_capacity ) { bin_num_vector.shrink_to_fit(); //gantry_angle_vector.shrink_to_fit(); WEPL_vector.shrink_to_fit(); x_entry_vector.shrink_to_fit(); y_entry_vector.shrink_to_fit(); z_entry_vector.shrink_to_fit(); x_exit_vector.shrink_to_fit(); y_exit_vector.shrink_to_fit(); z_exit_vector.shrink_to_fit(); xy_entry_angle_vector.shrink_to_fit(); xz_entry_angle_vector.shrink_to_fit(); //xy_exit_angle_vector.shrink_to_fit(); //xz_exit_angle_vector.shrink_to_fit(); relative_ut_angle_vector.shrink_to_fit(); relative_uv_angle_vector.shrink_to_fit(); } /************************************************************************************************************************************************************/ /****************************************************** Routines for Writing Data Arrays/Vectors to Disk ****************************************************/ /************************************************************************************************************************************************************/ void write_bool_array_to_files( char* output_filename_base, const char* output_directory, const char* output_folder, bool* bool_array, int x_max, int y_max, int z_max ) { char output_filename[256]; // Write each slice of the array/image to a separate file for(int z = 0; z < z_max; z++) { ofstream output_file; sprintf( output_filename, "%s%s/%s_%d.txt", output_directory, output_folder, output_filename_base, z ); output_file.open(output_filename); for(int y = 0; y < y_max; y++) { for(int x = 0; x < x_max; x++) output_file << bool_array[(z*x_max*y_max)+(y*x_max)+x] << " "; output_file << endl; } output_file.close(); } } void write_bool_array_to_file( char* output_filename_base, const char* output_directory, const char* output_folder, bool* bool_array, int x_max, int y_max, int z_max ) { // Write each slice of the array/image to a single file ofstream output_file; char output_filename[256]; sprintf( output_filename, "%s%s/%s.txt", output_directory, output_folder, output_filename_base ); output_file.open(output_filename); for( int z = 0; z < z_max; z++ ) { for( int y = 0; y < y_max; y++ ) { for( int x = 0; x < x_max; x++ ) output_file << bool_array[( z * x_max * y_max ) + ( y * x_max ) + x] << " "; output_file << endl; } }//607,999 output_file.close(); } void write_integer_array_to_files( char* output_filename_base, const char* output_directory, const char* output_folder, int* integer_array, int x_max, int y_max, int z_max ) { char output_filename[256]; // Write each slice of the array/image to a separate file for(int z = 0; z < z_max; z++) { ofstream output_file; sprintf( output_filename, "%s%s/%s_%d.txt", output_directory, output_folder, output_filename_base, z ); output_file.open(output_filename); for(int y = 0; y < y_max; y++) { for(int x = 0; x < x_max; x++) output_file << integer_array[(z*x_max*y_max)+(y*x_max)+x] << " "; output_file << endl; } output_file.close(); } } void write_integer_array_to_file( char* output_filename_base, const char* output_directory, const char* output_folder, int* integer_array, int x_max, int y_max, int z_max ) { // Write each slice of the array/image to a single file ofstream output_file; char output_filename[256]; sprintf( output_filename, "%s%s/%s.txt", output_directory, output_folder, output_filename_base ); output_file.open(output_filename); for( int z = 0; z < z_max; z++ ) { for( int y = 0; y < y_max; y++ ) { for( int x = 0; x < x_max; x++ ) output_file << integer_array[( z * x_max * y_max ) + ( y * x_max ) + x] << " "; output_file << endl; } }//607,999 output_file.close(); } void write_float_array_to_files( char* output_filename_base, const char* output_directory, const char* output_folder, float* &float_array, int x_max, int y_max, int z_max ) { char output_filename[256]; // Write each slice of the array/image to a separate file for(int z = 0; z < z_max; z++) { ofstream output_file; sprintf( output_filename, "%s%s/%s_%d.txt", output_directory, output_folder, output_filename_base, z ); output_file.open(output_filename); for(int y = 0; y < y_max; y++) { for(int x = 0; x < x_max; x++) output_file << float_array[ ( z * x_max * y_max ) + ( y * x_max ) + x ] << " "; output_file << endl; } output_file.close(); } } void write_float_array_to_file( char* output_filename_base, const char* output_directory, const char* output_folder, float* float_array, int x_max, int y_max, int z_max ) { // Write each slice of the array/image to a single file ofstream output_file; char output_filename[256]; sprintf( output_filename, "%s%s/%s.txt", output_directory, output_folder, output_filename_base ); output_file.open(output_filename); for( int z = 0; z < z_max; z++ ) { for( int y = 0; y < y_max; y++ ) { for( int x = 0; x < x_max; x++ ) output_file << float_array[( z * x_max * y_max ) + ( y * x_max ) + x] << " "; output_file << endl; } }//607,999 output_file.close(); } void write_float_vector_to_file( char* output_filename_base, const char* output_directory, const char* output_folder, vector<float> vector_array, int x_max, int y_max, int z_max ) { // Write each slice of the vector/image to a single file ofstream output_file; char output_filename[256]; sprintf( output_filename, "%s%s/%s.txt", output_directory, output_folder, output_filename_base ); output_file.open(output_filename); for( int z = 0; z < z_max; z++ ) { for( int y = 0; y < y_max; y++ ) { for( int x = 0; x < x_max; x++ ) output_file << vector_array[( z * x_max * y_max ) + ( y * x_max ) + x] << " "; output_file << endl; } }//607,999 output_file.close(); } /************************************************************************************************************************************************************/ /********************************************************************* Helper Functions *********************************************************************/ /************************************************************************************************************************************************************/ bool bad_data_angle( int angle ) { static const int bad_angles_array[] = {80, 84, 88, 92, 96, 100, 00, 180, 260, 264, 268, 272, 276}; vector<int> bad_angles(bad_angles_array, bad_angles_array + sizeof(bad_angles_array) / sizeof(bad_angles_array[0]) ); bool bad_angle = false; for( int i = 0; i < bad_angles.size(); i++ ) if( angle == bad_angles[i] ) bad_angle = true; return bad_angle; } int calculate_x_voxel(float x_position, int x_voxels, float voxel_width ) { // -10 100 1 [-50 49] -40 float x_width = x_voxels * voxel_width;//100 float x_range = x_width/2;//50 return ( x_position + x_range) / voxel_width;//-10+50/1 = 40 //[0 99] } int calculate_y_voxel(float y_position, int y_voxels, float voxel_height ) { // 10 100 1 [-50 49] 40 float y_width = y_voxels * voxel_height;//100 float y_range = y_width/2;//50 return ( y_range - y_position ) / voxel_height; } int calculate_slice(float z_position, int z_voxels, float voxel_thickness ) { // -10 100 1 [-50 49] -40 float z_width = z_voxels * voxel_thickness;//100 float z_range = z_width/2;//50 return ( z_range - z_position ) / voxel_thickness; } /************************************************************************************************************************************************************/ /****************************************************************** Testing Functions ***********************************************************************/ /************************************************************************************************************************************************************/ void test_func() { char user_response[20]; //fgets(user_response, sizeof(user_response), stdin); bool* passed_cuts_h = (bool*)calloc (30, sizeof(bool)); for( int i = 0; i < 30; i++ ) { bin_num_vector.push_back(i); WEPL_vector.push_back(i); x_entry_vector.push_back(i); y_entry_vector.push_back(i); z_entry_vector.push_back(i); x_exit_vector.push_back(i); y_exit_vector.push_back(i); z_exit_vector.push_back(i); xy_entry_angle_vector.push_back(i); xz_entry_angle_vector.push_back(i); xy_exit_angle_vector.push_back(i); xz_exit_angle_vector.push_back(i); passed_cuts_h[i] = i%2; } for( int i = 0; i < 30; i++ ) { printf("bin_num_vector[%d] = %d\n", i, bin_num_vector[i]); printf("WEPL_vector[%d] = %3f\n", i, WEPL_vector[i]); printf("x_entry_vector[%d] = %3f\n", i, x_entry_vector[i]); printf("y_entry_vector[%d] = %3f\n", i, y_entry_vector[i]); printf("z_entry_vector[%d] = %3f\n", i, z_entry_vector[i]); printf("x_exit_vector[%d] = %3f\n", i, x_exit_vector[i]); printf("y_exit_vector[%d] = %3f\n", i, y_exit_vector[i]); printf("z_exit_vector[%d] = %3f\n", i, z_exit_vector[i]); printf("xy_entry_angle_vector[%d] = %3f\n", i, xy_entry_angle_vector[i]); printf("xz_entry_angle_vector[%d] = %3f\n", i, xz_entry_angle_vector[i]); printf("xy_exit_angle_vector[%d] = %3f\n", i, xy_exit_angle_vector[i]); printf("xz_exit_angle_vector[%d] = %3f\n", i, xz_exit_angle_vector[i]); printf("passed_cuts_h[%d] = %d\n", i, passed_cuts_h[i]); fgets(user_response, sizeof(user_response), stdin); } int start_position = 0; int post_cut_histories = 0; for( int iteration = 0; iteration < 6; iteration++ ) { printf("start iteration %d\n", iteration ); for( int i = 0; i < 5; i++ ) { if( passed_cuts_h[start_position + i] ) { printf("start i = %d\n", i ); printf("index = %d\n", start_position + i ); bin_num_vector[post_cut_histories] = bin_num_vector[start_position + i]; WEPL_vector[post_cut_histories] = WEPL_vector[start_position + i]; x_entry_vector[post_cut_histories] = x_entry_vector[start_position + i]; y_entry_vector[post_cut_histories] = y_entry_vector[start_position + i]; z_entry_vector[post_cut_histories] = z_entry_vector[start_position + i]; x_exit_vector[post_cut_histories] = x_exit_vector[start_position + i]; y_exit_vector[post_cut_histories] = y_exit_vector[start_position + i]; z_exit_vector[post_cut_histories] = z_exit_vector[start_position + i]; xy_entry_angle_vector[post_cut_histories] = xy_entry_angle_vector[start_position + i]; xz_entry_angle_vector[post_cut_histories] = xz_entry_angle_vector[start_position + i]; xy_exit_angle_vector[post_cut_histories] = xy_exit_angle_vector[start_position + i]; xz_exit_angle_vector[post_cut_histories] = xz_exit_angle_vector[start_position + i]; post_cut_histories++; printf("end i = %d\n", i ); } } start_position += 5; printf("end iteration %d\n", iteration ); } bin_num_vector.resize(post_cut_histories); WEPL_vector.resize(post_cut_histories); x_entry_vector.resize(post_cut_histories); y_entry_vector.resize(post_cut_histories); z_entry_vector.resize(post_cut_histories); x_exit_vector.resize(post_cut_histories); y_exit_vector.resize(post_cut_histories); z_exit_vector.resize(post_cut_histories); xy_entry_angle_vector.resize(post_cut_histories); xz_entry_angle_vector.resize(post_cut_histories); xy_exit_angle_vector.resize(post_cut_histories); xz_exit_angle_vector.resize(post_cut_histories); printf("post_cuts\n\n\n"); printf("post_cut_histories = %d\n\n", post_cut_histories); for( int i = 0; i < post_cut_histories; i++ ) { printf("bin_num_vector[%d] = %d\n", i, bin_num_vector[i]); printf("WEPL_vector[%d] = %3f\n", i, WEPL_vector[i]); printf("x_entry_vector[%d] = %3f\n", i, x_entry_vector[i]); printf("y_entry_vector[%d] = %3f\n", i, y_entry_vector[i]); printf("z_entry_vector[%d] = %3f\n", i, z_entry_vector[i]); printf("x_exit_vector[%d] = %3f\n", i, x_exit_vector[i]); printf("y_exit_vector[%d] = %3f\n", i, y_exit_vector[i]); printf("z_exit_vector[%d] = %3f\n", i, z_exit_vector[i]); printf("xy_entry_angle_vector[%d] = %3f\n", i, xy_entry_angle_vector[i]); printf("xz_entry_angle_vector[%d] = %3f\n", i, xz_entry_angle_vector[i]); printf("xy_exit_angle_vector[%d] = %3f\n", i, xy_exit_angle_vector[i]); printf("xz_exit_angle_vector[%d] = %3f\n", i, xz_exit_angle_vector[i]); printf("passed_cuts_h[%d] = %d\n", i, passed_cuts_h[i]); fgets(user_response, sizeof(user_response), stdin); } } __global__ void test_func_kernel( int* test_array, int vec_array_elements ) { for(int i = 0; i < vec_array_elements; i++ ) test_array[i] *= 2; }
6f634305996ef1b9191afe326a0a6020369ef1b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void profileLevelZero_kernel() {}
6f634305996ef1b9191afe326a0a6020369ef1b8.cu
#include "includes.h" __global__ void profileLevelZero_kernel() {}
93e9a100de46ccbba0e1beaa172d4bd06e2d6588.hip
// !!! This is a file automatically generated by hipify!!! /* xor_mlp.cu XOR network implementation with general MLP CUDA code. Andrei de A. Formiga, 2012-06-19 */ #include <stdio.h> #include "mlpnnets.h" // constant for the RNG seed #define SEED 419217ULL //#define SEED 149317ULL //#define SEED 27ULL // maximum absolute value for random initialization of weights #define MAX_ABS 1.5f // inputs for all cases float inputs[] = { 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f }; const int ncases = 4; // neurons per layer int neuronsPerLayer[] = { 2, 2, 1 }; // array for expected outputs float expected[] = { 0.1f, 0.9f, 0.9f, 0.1f }; // array to store calculated outputs float *outputs; // the network MLPNetwork *xornn; // training dataset DataSet *trainData; // --- main ---------------------------------------------------------- int main(int argc, char **argv) { // create network xornn = CreateNetwork(3, neuronsPerLayer); if (xornn == NULL) { fprintf(stderr, "Error creating XOR network\n"); return -1; } // initialize weights printf("* Initializing weights\n"); RandomWeights(xornn, MAX_ABS, SEED); // print weights printf("* Random initial weights for network:\n# "); PrintWeights(xornn); // create dataset trainData = (DataSet*) malloc(sizeof(DataSet)); if (trainData == NULL) { fprintf(stderr, "Could not allocate memory for dataset structure\n"); return -1; } trainData->nCases = ncases; trainData->inputSize = 2; trainData->outputSize = 1; trainData->inputs = inputs; trainData->outputs = expected; trainData->location = LOC_HOST; // train the network int epochs = 6000; printf("* Training network by backpropagation with %d epochs\n", epochs); float sse; sse = BatchTrainBackprop(xornn, trainData, epochs, 0.75f, 1, 0); printf("* Final SSE after training: %7.9f\n", sse); // print weights after training printf("* Weights for network after training:\n# "); PrintWeights(xornn); // test trained networks with known inputs (assume outputs are already allocated) printf("* Calculating outputs for input cases\n"); PresentInputsFromDataSet(xornn, trainData, ACTF_SIGMOID); // // print outputs per layer (debug) // float *outs; // for (int i = 0; i < 3; ++i) { // printf("* Outputs for layer %d (off=%d, wPN=%d):\n", i, // xornn->layers[i]->weightOffset, xornn->layers[i]->weightsPerNeuron); // outs = GetLayerOutputs(xornn, i); // if (outs == NULL) // printf("! Couldn't get outputs for layer %d\n", i); // else { // for (int j = 0; j < xornn->layers[i]->nNeurons * xornn->nCases; ++j) { // printf("%5.3f ", outs[j]); // } // printf("\n"); // } // free(outs); // } hipDeviceSynchronize(); // copy outputs to host memory outputs = (float*) malloc(4 * sizeof(float)); CopyNetworkOutputs(xornn, outputs); // display results printf("* Results: \n"); for (int i = 0; i < ncases; ++i) { printf("- Output for case (%f, %f) = %f\n", inputs[i*2], inputs[i*2+1], outputs[i]); } free(outputs); DestroyNetwork(xornn); return 0; }
93e9a100de46ccbba0e1beaa172d4bd06e2d6588.cu
/* xor_mlp.cu XOR network implementation with general MLP CUDA code. Andrei de A. Formiga, 2012-06-19 */ #include <stdio.h> #include "mlpnnets.h" // constant for the RNG seed #define SEED 419217ULL //#define SEED 149317ULL //#define SEED 27ULL // maximum absolute value for random initialization of weights #define MAX_ABS 1.5f // inputs for all cases float inputs[] = { 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f }; const int ncases = 4; // neurons per layer int neuronsPerLayer[] = { 2, 2, 1 }; // array for expected outputs float expected[] = { 0.1f, 0.9f, 0.9f, 0.1f }; // array to store calculated outputs float *outputs; // the network MLPNetwork *xornn; // training dataset DataSet *trainData; // --- main ---------------------------------------------------------- int main(int argc, char **argv) { // create network xornn = CreateNetwork(3, neuronsPerLayer); if (xornn == NULL) { fprintf(stderr, "Error creating XOR network\n"); return -1; } // initialize weights printf("* Initializing weights\n"); RandomWeights(xornn, MAX_ABS, SEED); // print weights printf("* Random initial weights for network:\n# "); PrintWeights(xornn); // create dataset trainData = (DataSet*) malloc(sizeof(DataSet)); if (trainData == NULL) { fprintf(stderr, "Could not allocate memory for dataset structure\n"); return -1; } trainData->nCases = ncases; trainData->inputSize = 2; trainData->outputSize = 1; trainData->inputs = inputs; trainData->outputs = expected; trainData->location = LOC_HOST; // train the network int epochs = 6000; printf("* Training network by backpropagation with %d epochs\n", epochs); float sse; sse = BatchTrainBackprop(xornn, trainData, epochs, 0.75f, 1, 0); printf("* Final SSE after training: %7.9f\n", sse); // print weights after training printf("* Weights for network after training:\n# "); PrintWeights(xornn); // test trained networks with known inputs (assume outputs are already allocated) printf("* Calculating outputs for input cases\n"); PresentInputsFromDataSet(xornn, trainData, ACTF_SIGMOID); // // print outputs per layer (debug) // float *outs; // for (int i = 0; i < 3; ++i) { // printf("* Outputs for layer %d (off=%d, wPN=%d):\n", i, // xornn->layers[i]->weightOffset, xornn->layers[i]->weightsPerNeuron); // outs = GetLayerOutputs(xornn, i); // if (outs == NULL) // printf("! Couldn't get outputs for layer %d\n", i); // else { // for (int j = 0; j < xornn->layers[i]->nNeurons * xornn->nCases; ++j) { // printf("%5.3f ", outs[j]); // } // printf("\n"); // } // free(outs); // } cudaThreadSynchronize(); // copy outputs to host memory outputs = (float*) malloc(4 * sizeof(float)); CopyNetworkOutputs(xornn, outputs); // display results printf("* Results: \n"); for (int i = 0; i < ncases; ++i) { printf("- Output for case (%f, %f) = %f\n", inputs[i*2], inputs[i*2+1], outputs[i]); } free(outputs); DestroyNetwork(xornn); return 0; }
1c57b8ee125fe9ce68f9a8b19e3ed67b9358a242.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * nullKernelAsync.cu * * Microbenchmark for throughput of asynchronous kernel launch. * * Build with: nvcc -I ../chLib <options> nullKernelAsync.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include "chTimer.h" __global__ void NullKernel() { } int main( int argc, char *argv[] ) { //const int cIterations = 1000000; const int cIterations = 1000000; printf( "Measuring synchronous launch time... " ); fflush( stdout ); chTimerTimestamp start, stop; chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { hipLaunchKernelGGL(( NullKernel), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); } chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf( "%.2f us\n", usPerLaunch ); } return 0; }
1c57b8ee125fe9ce68f9a8b19e3ed67b9358a242.cu
/* * * nullKernelAsync.cu * * Microbenchmark for throughput of asynchronous kernel launch. * * Build with: nvcc -I ../chLib <options> nullKernelAsync.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include "chTimer.h" __global__ void NullKernel() { } int main( int argc, char *argv[] ) { //const int cIterations = 1000000; const int cIterations = 1000000; printf( "Measuring synchronous launch time... " ); fflush( stdout ); chTimerTimestamp start, stop; chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { NullKernel<<<1,1>>>(); cudaThreadSynchronize(); } chTimerGetTime( &stop ); { double microseconds = 1e6*chTimerElapsedTime( &start, &stop ); double usPerLaunch = microseconds / (float) cIterations; printf( "%.2f us\n", usPerLaunch ); } return 0; }
878d58af09bbc9e2baed8c600fb69dece7f3cd46.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_divf (size_t n, float *result, float *x, float *y) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = x[id] / y[id]; } }
878d58af09bbc9e2baed8c600fb69dece7f3cd46.cu
#include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_divf (size_t n, float *result, float *x, float *y) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = x[id] / y[id]; } }
cbab03164efced43f3ca5217754c8772548090ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <gtest/gtest.h> #include <unordered_map> #include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/meta_data.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/place.h" // This unit test is an example comparing the performance between using pinned // memory and not. In general, using pinned memory will be faster. template <typename T> __global__ void Kernel(T* output, int dim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < dim) { output[tid] = output[tid] * output[tid] / 100; } } template <typename Place> float test_pinned_memory() { Place cpu_place; paddle::platform::CUDAPlace cuda_place; const int data_size = 4096; const int iteration = 10; // create event start and end hipEvent_t start_e, stop_e, copying_e; float elapsedTime = 0; hipEventCreate(&start_e); hipEventCreate(&stop_e); hipEventCreate(&copying_e); // create computation stream, data copying stream hipStream_t computation_stream, copying_stream; hipStreamCreate(&computation_stream); hipStreamCreate(&copying_stream); // create record event, pinned memory, gpu memory std::vector<hipEvent_t> record_event(iteration); std::vector<float*> input_pinned_mem(iteration); std::vector<float*> gpu_mem(iteration); std::vector<float*> output_pinned_mem(iteration); // initial data for (int j = 0; j < iteration; ++j) { hipEventCreateWithFlags(&record_event[j], hipEventDisableTiming); hipEventCreate(&(record_event[j])); input_pinned_mem[j] = static_cast<float*>( paddle::memory::Alloc(cpu_place, data_size * sizeof(float))); output_pinned_mem[j] = static_cast<float*>( paddle::memory::Alloc(cpu_place, data_size * sizeof(float))); gpu_mem[j] = static_cast<float*>( paddle::memory::Alloc(cuda_place, data_size * sizeof(float))); for (int k = 0; k < data_size; ++k) { input_pinned_mem[j][k] = k; } } hipEventRecord(start_e, computation_stream); // computation for (int m = 0; m < 30; ++m) { for (int i = 0; i < iteration; ++i) { // cpu -> GPU on computation stream. // note: this operation is async for pinned memory. paddle::memory::Copy(cuda_place, gpu_mem[i], cpu_place, input_pinned_mem[i], data_size * sizeof(float), computation_stream); // call kernel on computation stream. hipLaunchKernelGGL(( Kernel), dim3(4), dim3(1024), 0, computation_stream, gpu_mem[i], data_size); // record event_computation on computation stream hipEventRecord(record_event[i], computation_stream); // wait event_computation on copy stream. // note: this operation is async. hipStreamWaitEvent(copying_stream, record_event[i], 0); // copy data GPU->CPU, on copy stream. // note: this operation is async for pinned memory. paddle::memory::Copy(cpu_place, output_pinned_mem[i], cuda_place, gpu_mem[i], data_size * sizeof(float), copying_stream); } } hipEventRecord(copying_e, copying_stream); hipStreamWaitEvent(computation_stream, copying_e, 0); hipEventRecord(stop_e, computation_stream); hipEventSynchronize(start_e); hipEventSynchronize(stop_e); hipEventElapsedTime(&elapsedTime, start_e, stop_e); // std::cout << cpu_place << " " // << "time consume:" << elapsedTime / 30 << std::endl; for (int l = 0; l < iteration; ++l) { for (int k = 0; k < data_size; ++k) { float temp = input_pinned_mem[l][k]; temp = temp * temp / 100; EXPECT_FLOAT_EQ(temp, output_pinned_mem[l][k]); } } // destroy resource hipEventDestroy(copying_e); hipEventDestroy(start_e); hipEventDestroy(stop_e); for (int j = 0; j < 10; ++j) { hipEventDestroy((record_event[j])); paddle::memory::Free(cpu_place, input_pinned_mem[j]); paddle::memory::Free(cpu_place, output_pinned_mem[j]); paddle::memory::Free(cuda_place, gpu_mem[j]); } return elapsedTime / 30; } TEST(CPUANDCUDAPinned, CPUAllocatorAndCUDAPinnedAllocator) { // Generally speaking, operation on pinned_memory is faster than that on // unpinned-memory, but if this unit test fails frequently, please close this // test for the time being. float time1 = test_pinned_memory<paddle::platform::CPUPlace>(); float time2 = test_pinned_memory<paddle::platform::CUDAPinnedPlace>(); EXPECT_GT(time1, time2); }
cbab03164efced43f3ca5217754c8772548090ab.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <gtest/gtest.h> #include <unordered_map> #include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/meta_data.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/place.h" // This unit test is an example comparing the performance between using pinned // memory and not. In general, using pinned memory will be faster. template <typename T> __global__ void Kernel(T* output, int dim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < dim) { output[tid] = output[tid] * output[tid] / 100; } } template <typename Place> float test_pinned_memory() { Place cpu_place; paddle::platform::CUDAPlace cuda_place; const int data_size = 4096; const int iteration = 10; // create event start and end cudaEvent_t start_e, stop_e, copying_e; float elapsedTime = 0; cudaEventCreate(&start_e); cudaEventCreate(&stop_e); cudaEventCreate(&copying_e); // create computation stream, data copying stream cudaStream_t computation_stream, copying_stream; cudaStreamCreate(&computation_stream); cudaStreamCreate(&copying_stream); // create record event, pinned memory, gpu memory std::vector<cudaEvent_t> record_event(iteration); std::vector<float*> input_pinned_mem(iteration); std::vector<float*> gpu_mem(iteration); std::vector<float*> output_pinned_mem(iteration); // initial data for (int j = 0; j < iteration; ++j) { cudaEventCreateWithFlags(&record_event[j], cudaEventDisableTiming); cudaEventCreate(&(record_event[j])); input_pinned_mem[j] = static_cast<float*>( paddle::memory::Alloc(cpu_place, data_size * sizeof(float))); output_pinned_mem[j] = static_cast<float*>( paddle::memory::Alloc(cpu_place, data_size * sizeof(float))); gpu_mem[j] = static_cast<float*>( paddle::memory::Alloc(cuda_place, data_size * sizeof(float))); for (int k = 0; k < data_size; ++k) { input_pinned_mem[j][k] = k; } } cudaEventRecord(start_e, computation_stream); // computation for (int m = 0; m < 30; ++m) { for (int i = 0; i < iteration; ++i) { // cpu -> GPU on computation stream. // note: this operation is async for pinned memory. paddle::memory::Copy(cuda_place, gpu_mem[i], cpu_place, input_pinned_mem[i], data_size * sizeof(float), computation_stream); // call kernel on computation stream. Kernel<<<4, 1024, 0, computation_stream>>>(gpu_mem[i], data_size); // record event_computation on computation stream cudaEventRecord(record_event[i], computation_stream); // wait event_computation on copy stream. // note: this operation is async. cudaStreamWaitEvent(copying_stream, record_event[i], 0); // copy data GPU->CPU, on copy stream. // note: this operation is async for pinned memory. paddle::memory::Copy(cpu_place, output_pinned_mem[i], cuda_place, gpu_mem[i], data_size * sizeof(float), copying_stream); } } cudaEventRecord(copying_e, copying_stream); cudaStreamWaitEvent(computation_stream, copying_e, 0); cudaEventRecord(stop_e, computation_stream); cudaEventSynchronize(start_e); cudaEventSynchronize(stop_e); cudaEventElapsedTime(&elapsedTime, start_e, stop_e); // std::cout << cpu_place << " " // << "time consume:" << elapsedTime / 30 << std::endl; for (int l = 0; l < iteration; ++l) { for (int k = 0; k < data_size; ++k) { float temp = input_pinned_mem[l][k]; temp = temp * temp / 100; EXPECT_FLOAT_EQ(temp, output_pinned_mem[l][k]); } } // destroy resource cudaEventDestroy(copying_e); cudaEventDestroy(start_e); cudaEventDestroy(stop_e); for (int j = 0; j < 10; ++j) { cudaEventDestroy((record_event[j])); paddle::memory::Free(cpu_place, input_pinned_mem[j]); paddle::memory::Free(cpu_place, output_pinned_mem[j]); paddle::memory::Free(cuda_place, gpu_mem[j]); } return elapsedTime / 30; } TEST(CPUANDCUDAPinned, CPUAllocatorAndCUDAPinnedAllocator) { // Generally speaking, operation on pinned_memory is faster than that on // unpinned-memory, but if this unit test fails frequently, please close this // test for the time being. float time1 = test_pinned_memory<paddle::platform::CPUPlace>(); float time2 = test_pinned_memory<paddle::platform::CUDAPinnedPlace>(); EXPECT_GT(time1, time2); }
4b9a841c687b73df9ceda7a263932b164efd5a7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: P = M * N. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P) { //Multiply the two matrices int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0; if (row < P.height && col < P.width){ for (int k = 0; k < M.width; k++) { sum += M.elements[row*M.width + k] * N.elements[k*N.width + col]; } P.elements[row*P.width + col] = sum; } } #endif // #ifndef _MATRIXMUL_KERNEL_H_
4b9a841c687b73df9ceda7a263932b164efd5a7d.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: P = M * N. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P) { //Multiply the two matrices int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0; if (row < P.height && col < P.width){ for (int k = 0; k < M.width; k++) { sum += M.elements[row*M.width + k] * N.elements[k*N.width + col]; } P.elements[row*P.width + col] = sum; } } #endif // #ifndef _MATRIXMUL_KERNEL_H_
365001c66cafe8aaceec404458437ef0a7c57e9c.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/highgui.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "Header.h" #define TX 27 // number of threads per block along x-axis #define TY 27 // number of threads per block along y-axis __global__ void greyscaleKernel(pixel* img, unsigned W, unsigned H, unsigned dim) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int r_avg = 0; unsigned int g_avg = 0; unsigned int b_avg = 0; int temp = (dim - 1) / 2; if ((((y - temp)*W + x - temp) < W*H) && (((y + temp)*W + x + temp) < W*H)) { for (int i = temp; i > -(temp+1); i--) { for (int j = -temp; j < (temp+1); j++) { r_avg += img[(y + i)*W + x + j].r; g_avg += img[(y + i)*W + x + j].g; b_avg += img[(y + i)*W + x + j].b; } } } img[y*W + x].r = r_avg/(dim*dim); img[y*W + x].g = g_avg/(dim*dim); img[y*W + x].b = b_avg/ (dim*dim); } // Helper function for using CUDA to add vectors in parallel. void cudaGrayscale(pixel* h_img, unsigned W, unsigned H, unsigned dim) { pixel* d_img; size_t bytes = W * H * sizeof(pixel); hipMalloc(&d_img, bytes); hipMemcpy(d_img, h_img, bytes, hipMemcpyHostToDevice); dim3 blockSize(TX, TY); int bx = (W + 26) / blockSize.x; int by = (H + 26) / blockSize.y; dim3 gridSize = dim3(bx, by); // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( greyscaleKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_img, W, H ,dim); hipMemcpy(h_img, d_img, bytes, hipMemcpyDeviceToHost); hipFree(d_img); }
365001c66cafe8aaceec404458437ef0a7c57e9c.cu
#include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/highgui.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "Header.h" #define TX 27 // number of threads per block along x-axis #define TY 27 // number of threads per block along y-axis __global__ void greyscaleKernel(pixel* img, unsigned W, unsigned H, unsigned dim) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int r_avg = 0; unsigned int g_avg = 0; unsigned int b_avg = 0; int temp = (dim - 1) / 2; if ((((y - temp)*W + x - temp) < W*H) && (((y + temp)*W + x + temp) < W*H)) { for (int i = temp; i > -(temp+1); i--) { for (int j = -temp; j < (temp+1); j++) { r_avg += img[(y + i)*W + x + j].r; g_avg += img[(y + i)*W + x + j].g; b_avg += img[(y + i)*W + x + j].b; } } } img[y*W + x].r = r_avg/(dim*dim); img[y*W + x].g = g_avg/(dim*dim); img[y*W + x].b = b_avg/ (dim*dim); } // Helper function for using CUDA to add vectors in parallel. void cudaGrayscale(pixel* h_img, unsigned W, unsigned H, unsigned dim) { pixel* d_img; size_t bytes = W * H * sizeof(pixel); cudaMalloc(&d_img, bytes); cudaMemcpy(d_img, h_img, bytes, cudaMemcpyHostToDevice); dim3 blockSize(TX, TY); int bx = (W + 26) / blockSize.x; int by = (H + 26) / blockSize.y; dim3 gridSize = dim3(bx, by); // Launch a kernel on the GPU with one thread for each element. greyscaleKernel<<<gridSize, blockSize>>>(d_img, W, H ,dim); cudaMemcpy(h_img, d_img, bytes, cudaMemcpyDeviceToHost); cudaFree(d_img); }
82b601d05cc86b3318231371a43c002643a6de02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License./ #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_vector_utils.h" namespace TNN_NS { DECLARE_CUDA_ACC(LogSigmoid, LAYER_LOGSIGMOID); __global__ void log_sigmoid_kernel(int count, const float* input, float* output) { CUDA_KERNEL_LOOP(index, count) { output[index] = __logf(1.0f / (1.0f + __expf(-input[index]))); } } Status CudaLogSigmoidLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return CudaLayerAcc::Init(context, param, resource, inputs, outputs); } Status CudaLogSigmoidLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return TNN_OK; } Status CudaLogSigmoidLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; int count = DimsVectorUtils::Count(output_blob->GetBlobDesc().dims); if (output_blob->GetBlobDesc().data_type == DATA_TYPE_FLOAT) { float *input_data = static_cast<float *>(input_blob->GetHandle().base); float *output_data = static_cast<float *>(output_blob->GetHandle().base); hipLaunchKernelGGL(( log_sigmoid_kernel), dim3(TNN_CUDA_GET_BLOCKS(count)), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(), count, input_data, output_data); } else { return Status(TNNERR_LAYER_ERR, "datatype not support"); } return TNN_OK; } REGISTER_CUDA_ACC(LogSigmoid, LAYER_LOGSIGMOID); } // namespace TNN_NS
82b601d05cc86b3318231371a43c002643a6de02.cu
// Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License./ #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_vector_utils.h" namespace TNN_NS { DECLARE_CUDA_ACC(LogSigmoid, LAYER_LOGSIGMOID); __global__ void log_sigmoid_kernel(int count, const float* input, float* output) { CUDA_KERNEL_LOOP(index, count) { output[index] = __logf(1.0f / (1.0f + __expf(-input[index]))); } } Status CudaLogSigmoidLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return CudaLayerAcc::Init(context, param, resource, inputs, outputs); } Status CudaLogSigmoidLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return TNN_OK; } Status CudaLogSigmoidLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; int count = DimsVectorUtils::Count(output_blob->GetBlobDesc().dims); if (output_blob->GetBlobDesc().data_type == DATA_TYPE_FLOAT) { float *input_data = static_cast<float *>(input_blob->GetHandle().base); float *output_data = static_cast<float *>(output_blob->GetHandle().base); log_sigmoid_kernel<<<TNN_CUDA_GET_BLOCKS(count), TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>(count, input_data, output_data); } else { return Status(TNNERR_LAYER_ERR, "datatype not support"); } return TNN_OK; } REGISTER_CUDA_ACC(LogSigmoid, LAYER_LOGSIGMOID); } // namespace TNN_NS
a09260ffcb3adc5daba30aa0c17f7311971bb03a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <iostream> #include <stdexcept> #include "hist_processor.hpp" using namespace thrust; using namespace structured; using namespace std; int div_ceil(int numerator, int denominator) { std::div_t res = std::div(numerator, denominator); return res.rem ? (res.quot + 1) : res.quot; } template<typename T, bool is32Multiple, int nCh, int off = 1> __global__ void calcHist(T * out, T const (*in)[nCh], unsigned dim0, KernelArray<GaussianKernel> histKnl, float variance) { unsigned gx = TILE_DIM * blockIdx.x + threadIdx.x; unsigned gy = TILE_DIM * blockIdx.y + threadIdx.y; unsigned dim1 = histKnl.size(); #pragma unroll for (unsigned repeat = 0; repeat < TILE_DIM; repeat += blockDim.y) { unsigned gy_ = gy+repeat; if (is32Multiple || (gx<dim0 && gy_<dim1)) out[gy_ * dim0 + gx] = histKnl[gy_].dist<T, nCh, off> (in[gx], variance); } } template<typename T, bool is32Multiple, int nCh, int off = 1> __global__ void calcGrad(T (*out)[nCh], T const (*in)[nCh], const T * grad, unsigned dim0, KernelArray<GaussianKernel> histKnl, float variance) { unsigned gx = TILE_DIM * blockIdx.x + threadIdx.x; unsigned gy = TILE_DIM * blockIdx.y + threadIdx.y; unsigned dim1 = histKnl.size(); __shared__ T smem[TILE_DIM][TILE_DIM+1][2]; #pragma unroll for (unsigned repeat = 0; repeat < TILE_DIM; repeat += blockDim.y) { unsigned gy_ = gy+repeat; if (is32Multiple || (gx<dim0 && gy_<dim1)) histKnl[gy_].grad<T, nCh, off>( smem[threadIdx.x][threadIdx.y], in[gx], grad[gy_ * dim0 + gx], variance ); } __syncthreads(); if ( threadIdx.y < 2 ) { gy = TILE_DIM * blockIdx.y; T sum = 0; #pragma unroll for (unsigned repeat = 0; repeat < TILE_DIM; repeat++) { unsigned gy_ = gy+repeat; if (is32Multiple || (gx<dim0 && gy_<dim1)) sum += smem[threadIdx.x][repeat][threadIdx.y]; else break; } atomicAdd(out[gx] + off + threadIdx.y, sum); } } template <typename T> HistProc<T>::HistProc(const host_vector<GaussianKernel>& histKnl, float var) : histKnl(histKnl), variance(var) { } template <typename T> HistProc<T>::~HistProc() { } template <typename T> template <int nCh> void HistProc<T>::calcHistMat(T *dstdata, T const (*imgdata)[nCh], int npixels, int batch_size) { dim3 threadsPerBlock(TILE_DIM, BLOCK_ROWS, 1); dim3 numBlocks( div_ceil(npixels, TILE_DIM), div_ceil(histKnl.size(), TILE_DIM), 1); for(int batch=0; batch<batch_size; batch++) { hipLaunchKernelGGL(( calcHist<T, false, nCh, nCh-2>), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, dstdata, imgdata, npixels, histKnl, variance); imgdata += npixels; dstdata += histKnl.size() * npixels; } } template <typename T> template <int nCh> void HistProc<T>::calcHistGrad(T (*dstdata)[nCh], T const (*imgdata)[nCh], const T *graddata, int npixels, int batch_size){ dim3 threadsPerBlock(TILE_DIM, BLOCK_ROWS, 1); dim3 numBlocks( div_ceil(npixels, TILE_DIM), div_ceil(histKnl.size(), TILE_DIM), 1); for(int batch=0; batch<batch_size; batch++) { hipMemset(dstdata, 0, sizeof(*dstdata)*npixels); hipLaunchKernelGGL(( calcGrad<T, false, nCh, nCh-2>), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, dstdata, imgdata, graddata, npixels, histKnl, variance); dstdata += npixels; imgdata += npixels; graddata += histKnl.size() * npixels; } } template <typename T> void HistProc<T>::calcHistMat(const BufferedData& img, BufferedData& dst) { int batch_size = img.dim_size(0); int npixels = img.dim_size(1) * img.dim_size(2); int channel = img.dim_size(3); cerr<<"HistMat: npixels: "<<npixels<<", channel: "<<channel<<endl; if (channel == 2) { calcHistMat(dst.flat<T>(), img.flat<T[2]>(), npixels, batch_size); } else if (channel == 3){ calcHistMat(dst.flat<T>(), img.flat<T[3]>(), npixels, batch_size); } else { throw invalid_argument("channel not support"); } } template <typename T> void HistProc<T>::calcHistGrad(const structured::BufferedData& img, const structured::BufferedData& grad, structured::BufferedData& dst){ int batch_size = img.dim_size(0); int npixels = img.dim_size(1) * img.dim_size(2); int channel = img.dim_size(3); cerr<<"HistGrad: npixels: "<<npixels<<", channel: "<<channel<<endl; if (channel == 2) { calcHistGrad(dst.flat<T[2]>(), img.flat<T[2]>(), grad.flat<T>(), npixels, batch_size); } else if (channel == 3) { calcHistGrad(dst.flat<T[3]>(), img.flat<T[3]>(), grad.flat<T>(), npixels, batch_size); } else { throw invalid_argument("channel not support"); } } template struct HistProc<float>; template struct HistProc<double>;
a09260ffcb3adc5daba30aa0c17f7311971bb03a.cu
#include <cstdlib> #include <iostream> #include <stdexcept> #include "hist_processor.hpp" using namespace thrust; using namespace structured; using namespace std; int div_ceil(int numerator, int denominator) { std::div_t res = std::div(numerator, denominator); return res.rem ? (res.quot + 1) : res.quot; } template<typename T, bool is32Multiple, int nCh, int off = 1> __global__ void calcHist(T * out, T const (*in)[nCh], unsigned dim0, KernelArray<GaussianKernel> histKnl, float variance) { unsigned gx = TILE_DIM * blockIdx.x + threadIdx.x; unsigned gy = TILE_DIM * blockIdx.y + threadIdx.y; unsigned dim1 = histKnl.size(); #pragma unroll for (unsigned repeat = 0; repeat < TILE_DIM; repeat += blockDim.y) { unsigned gy_ = gy+repeat; if (is32Multiple || (gx<dim0 && gy_<dim1)) out[gy_ * dim0 + gx] = histKnl[gy_].dist<T, nCh, off> (in[gx], variance); } } template<typename T, bool is32Multiple, int nCh, int off = 1> __global__ void calcGrad(T (*out)[nCh], T const (*in)[nCh], const T * grad, unsigned dim0, KernelArray<GaussianKernel> histKnl, float variance) { unsigned gx = TILE_DIM * blockIdx.x + threadIdx.x; unsigned gy = TILE_DIM * blockIdx.y + threadIdx.y; unsigned dim1 = histKnl.size(); __shared__ T smem[TILE_DIM][TILE_DIM+1][2]; #pragma unroll for (unsigned repeat = 0; repeat < TILE_DIM; repeat += blockDim.y) { unsigned gy_ = gy+repeat; if (is32Multiple || (gx<dim0 && gy_<dim1)) histKnl[gy_].grad<T, nCh, off>( smem[threadIdx.x][threadIdx.y], in[gx], grad[gy_ * dim0 + gx], variance ); } __syncthreads(); if ( threadIdx.y < 2 ) { gy = TILE_DIM * blockIdx.y; T sum = 0; #pragma unroll for (unsigned repeat = 0; repeat < TILE_DIM; repeat++) { unsigned gy_ = gy+repeat; if (is32Multiple || (gx<dim0 && gy_<dim1)) sum += smem[threadIdx.x][repeat][threadIdx.y]; else break; } atomicAdd(out[gx] + off + threadIdx.y, sum); } } template <typename T> HistProc<T>::HistProc(const host_vector<GaussianKernel>& histKnl, float var) : histKnl(histKnl), variance(var) { } template <typename T> HistProc<T>::~HistProc() { } template <typename T> template <int nCh> void HistProc<T>::calcHistMat(T *dstdata, T const (*imgdata)[nCh], int npixels, int batch_size) { dim3 threadsPerBlock(TILE_DIM, BLOCK_ROWS, 1); dim3 numBlocks( div_ceil(npixels, TILE_DIM), div_ceil(histKnl.size(), TILE_DIM), 1); for(int batch=0; batch<batch_size; batch++) { calcHist<T, false, nCh, nCh-2><<<numBlocks, threadsPerBlock>>> (dstdata, imgdata, npixels, histKnl, variance); imgdata += npixels; dstdata += histKnl.size() * npixels; } } template <typename T> template <int nCh> void HistProc<T>::calcHistGrad(T (*dstdata)[nCh], T const (*imgdata)[nCh], const T *graddata, int npixels, int batch_size){ dim3 threadsPerBlock(TILE_DIM, BLOCK_ROWS, 1); dim3 numBlocks( div_ceil(npixels, TILE_DIM), div_ceil(histKnl.size(), TILE_DIM), 1); for(int batch=0; batch<batch_size; batch++) { cudaMemset(dstdata, 0, sizeof(*dstdata)*npixels); calcGrad<T, false, nCh, nCh-2><<<numBlocks, threadsPerBlock>>> (dstdata, imgdata, graddata, npixels, histKnl, variance); dstdata += npixels; imgdata += npixels; graddata += histKnl.size() * npixels; } } template <typename T> void HistProc<T>::calcHistMat(const BufferedData& img, BufferedData& dst) { int batch_size = img.dim_size(0); int npixels = img.dim_size(1) * img.dim_size(2); int channel = img.dim_size(3); cerr<<"HistMat: npixels: "<<npixels<<", channel: "<<channel<<endl; if (channel == 2) { calcHistMat(dst.flat<T>(), img.flat<T[2]>(), npixels, batch_size); } else if (channel == 3){ calcHistMat(dst.flat<T>(), img.flat<T[3]>(), npixels, batch_size); } else { throw invalid_argument("channel not support"); } } template <typename T> void HistProc<T>::calcHistGrad(const structured::BufferedData& img, const structured::BufferedData& grad, structured::BufferedData& dst){ int batch_size = img.dim_size(0); int npixels = img.dim_size(1) * img.dim_size(2); int channel = img.dim_size(3); cerr<<"HistGrad: npixels: "<<npixels<<", channel: "<<channel<<endl; if (channel == 2) { calcHistGrad(dst.flat<T[2]>(), img.flat<T[2]>(), grad.flat<T>(), npixels, batch_size); } else if (channel == 3) { calcHistGrad(dst.flat<T[3]>(), img.flat<T[3]>(), grad.flat<T>(), npixels, batch_size); } else { throw invalid_argument("channel not support"); } } template struct HistProc<float>; template struct HistProc<double>;
0d8dd587488696247928853abd515991339d1775.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zjacobisetup.cu normal z -> s, Tue Sep 2 12:38:33 2014 @author Hartwig Anzt */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void svjacobisetup_gpu( int num_rows, float *b, float *d, float *c, float *x){ int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ c[row] = b[row] / d[row]; x[row] = c[row]; } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param num_rows magma_int_t number of rows @param b magma_s_vector RHS b @param d magma_s_vector vector with diagonal entries @param c magma_s_vector* c = D^(-1) * b @param x magma_s_vector* iteration vector @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sjacobisetup_vector_gpu( int num_rows, float *b, float *d, float *c, float *x ){ dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( svjacobisetup_gpu), dim3(grid), dim3(BLOCK_SIZE), 0 , 0, num_rows, b, d, c, x ); return MAGMA_SUCCESS; } __global__ void sjacobidiagscal_kernel( int num_rows, float *b, float *d, float *c){ int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ c[row] = b[row] * d[row]; } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param num_rows magma_int_t number of rows @param b magma_s_vector RHS b @param d magma_s_vector vector with diagonal entries @param c magma_s_vector* c = D^(-1) * b @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_sjacobi_diagscal( int num_rows, float *b, float *d, float *c){ dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( sjacobidiagscal_kernel), dim3(grid), dim3(BLOCK_SIZE), 0 , 0, num_rows, b, d, c ); return MAGMA_SUCCESS; }
0d8dd587488696247928853abd515991339d1775.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zjacobisetup.cu normal z -> s, Tue Sep 2 12:38:33 2014 @author Hartwig Anzt */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void svjacobisetup_gpu( int num_rows, float *b, float *d, float *c, float *x){ int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ c[row] = b[row] / d[row]; x[row] = c[row]; } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param num_rows magma_int_t number of rows @param b magma_s_vector RHS b @param d magma_s_vector vector with diagonal entries @param c magma_s_vector* c = D^(-1) * b @param x magma_s_vector* iteration vector @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sjacobisetup_vector_gpu( int num_rows, float *b, float *d, float *c, float *x ){ dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); svjacobisetup_gpu<<< grid, BLOCK_SIZE, 0 >>>( num_rows, b, d, c, x ); return MAGMA_SUCCESS; } __global__ void sjacobidiagscal_kernel( int num_rows, float *b, float *d, float *c){ int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ c[row] = b[row] * d[row]; } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param num_rows magma_int_t number of rows @param b magma_s_vector RHS b @param d magma_s_vector vector with diagonal entries @param c magma_s_vector* c = D^(-1) * b @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_sjacobi_diagscal( int num_rows, float *b, float *d, float *c){ dim3 grid( (num_rows+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); sjacobidiagscal_kernel<<< grid, BLOCK_SIZE, 0 >>>( num_rows, b, d, c ); return MAGMA_SUCCESS; }
5b12e95ca2f2727b64dd6178aedb1f4a22b9c8b7.hip
// !!! This is a file automatically generated by hipify!!! #include <hipfft.h> #include <proj3.cuh> CudaBuffer::CudaBuffer(int n): n(n) { hipMalloc((void**)&data, sizeof(float)*n); } CudaBuffer::~CudaBuffer() { hipFree(data); }
5b12e95ca2f2727b64dd6178aedb1f4a22b9c8b7.cu
#include <cufft.h> #include <proj3.cuh> CudaBuffer::CudaBuffer(int n): n(n) { cudaMalloc((void**)&data, sizeof(float)*n); } CudaBuffer::~CudaBuffer() { cudaFree(data); }
ca4131246f78982b22c216bd466aaf97b6b7c539.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdio> #include "opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include <hip/hip_runtime.h> #include "helper_cuda.h" #include "opencv2/calib3d/calib3d.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/opencv.hpp" #include "opencv2/gpu/gpu.hpp" #include <stdio.h> #include <ctime> #include <sys/time.h> using namespace std; using namespace cv; const int BLOCKDIM = 32; const int MAX_WINDOW = 11; __device__ const int FILTER_SIZE = 9; __device__ const int FILTER_HALFSIZE = FILTER_SIZE >> 1; __device__ void sort_bubble(float *x, int n_size) { for (int i = 0; i < n_size - 1; i++) { for(int j = 0; j < n_size - i - 1; j++) { if (x[j] > x[j+1]) { float temp = x[j]; x[j] = x[j+1]; x[j+1] = temp; } } } } __device__ int index(int x, int y, int width) { return (y * width) + x; } __device__ int clamp(int value, int bound) { if (value < 0) { return 1; } if (value < bound) { return value; } return bound - 1; } __global__ void median_filter_2d(unsigned char* input, unsigned char* output, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if((x<width) && (y<height)) { const int color_tid = index(x,y,width); float windowMedian[MAX_WINDOW*MAX_WINDOW]; int windowElements = 0; for (int x_iter = x - FILTER_HALFSIZE; x_iter <= x + FILTER_HALFSIZE; x_iter ++) { for (int y_iter = y - FILTER_HALFSIZE; y_iter <= y + FILTER_HALFSIZE; y_iter++) { if (0<=x_iter && x_iter < width && 0 <= y_iter && y_iter < height) { windowMedian[windowElements++] = input[index(x_iter,y_iter,width)]; } } } sort_bubble(windowMedian,windowElements); output[color_tid] = windowMedian[windowElements/2]; } } __global__ void median_filter_2d_sm(unsigned char* input, unsigned char* output, int width, int height) { __shared__ int sharedPixels[BLOCKDIM + FILTER_SIZE][BLOCKDIM + FILTER_SIZE]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; int xBlockLimit_max = blockDim.x - FILTER_HALFSIZE - 1; int yBlockLimit_max = blockDim.y - FILTER_HALFSIZE - 1; int xBlockLimit_min = FILTER_HALFSIZE; int yBlockLimit_min = FILTER_HALFSIZE; if (threadIdx.x > xBlockLimit_max && threadIdx.y > yBlockLimit_max) { int i = index(clamp(x + FILTER_HALFSIZE,width), clamp(y + FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + 2*FILTER_HALFSIZE][threadIdx.y + 2*FILTER_HALFSIZE] = pixel; } if (threadIdx.x > xBlockLimit_max && threadIdx.y < yBlockLimit_min) { int i = index(clamp(x + FILTER_HALFSIZE,width), clamp(y - FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + 2*FILTER_HALFSIZE][threadIdx.y] = pixel; } if (threadIdx.x < xBlockLimit_min && threadIdx.y > yBlockLimit_max) { int i = index(clamp(x - FILTER_HALFSIZE,width), clamp(y + FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x][threadIdx.y + 2*FILTER_HALFSIZE] = pixel; } if (threadIdx.x < xBlockLimit_min && threadIdx.y < yBlockLimit_min) { int i = index(clamp(x - FILTER_HALFSIZE,width), clamp(y - FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x][threadIdx.y] = pixel; } if (threadIdx.x < xBlockLimit_min) { int i = index(clamp(x - FILTER_HALFSIZE,width), clamp(y,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x][threadIdx.y + FILTER_HALFSIZE] = pixel; } if (threadIdx.x > xBlockLimit_max) { int i = index(clamp(x + FILTER_HALFSIZE,width), clamp(y,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + 2*FILTER_HALFSIZE][threadIdx.y + FILTER_HALFSIZE] = pixel; } if (threadIdx.y < yBlockLimit_min) { int i = index(clamp(x,width), clamp(y - FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + FILTER_HALFSIZE][threadIdx.y] = pixel; } if (threadIdx.y > yBlockLimit_max) { int i = index(clamp(x,width), clamp(y + FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + FILTER_HALFSIZE][threadIdx.y + 2*FILTER_HALFSIZE] = pixel; } int i = index(x, y, width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + FILTER_HALFSIZE][threadIdx.y + FILTER_HALFSIZE] = pixel; __syncthreads(); if((x<width) && (y<height)) { const int color_tid = y * width + x; float windowMedian[MAX_WINDOW*MAX_WINDOW]; int windowElements = 0; for (int x_iter = 0; x_iter < FILTER_SIZE; x_iter ++) { for (int y_iter = 0; y_iter < FILTER_SIZE; y_iter++) { if (0<=x_iter && x_iter < width && 0 <= y_iter && y_iter < height) { windowMedian[windowElements++] = sharedPixels[threadIdx.x + x_iter][threadIdx.y + y_iter]; } } } sort_bubble(windowMedian,windowElements); output[color_tid] = windowMedian[windowElements/2]; } } void median_filter_wrapper(const cv::Mat& input, cv::Mat& output) { unsigned char *d_input, *d_output; hipError_t cudaStatus; cudaStatus = hipMalloc<unsigned char>(&d_input,input.rows*input.cols); checkCudaErrors(cudaStatus); cudaStatus = hipMalloc<unsigned char>(&d_output,output.rows*output.cols); checkCudaErrors(cudaStatus); cudaStatus = hipMemcpy(d_input,input.ptr(),input.rows*input.cols,hipMemcpyHostToDevice); checkCudaErrors(cudaStatus); const dim3 block(BLOCKDIM,BLOCKDIM); const dim3 grid(input.cols/BLOCKDIM, input.rows/BLOCKDIM); hipLaunchKernelGGL(( median_filter_2d), dim3(grid),dim3(block), 0, 0, d_input,d_output,input.cols,input.rows); cudaStatus = hipDeviceSynchronize(); checkCudaErrors(cudaStatus); cudaStatus = hipMemcpy(output.ptr(),d_output,output.rows*output.cols,hipMemcpyDeviceToHost); checkCudaErrors(cudaStatus); cudaStatus = hipFree(d_input); checkCudaErrors(cudaStatus); cudaStatus = hipFree(d_output); checkCudaErrors(cudaStatus); } int main(int argc, char* argv[]) { if (argc >= 2) { cout << " Usage: program image.format [output.format]" << endl; return -1; } string imagePath = argv[1]; Mat input = imread(imagePath,0); if(input.empty()) { cout<<"Could not load image. Check location and try again."<<endl; cin.get(); return -1; } Mat output_gpu(input.rows,input.cols,CV_8UC1); bilateral_filter_wrapper(input,output_gpu); string outputPath = imagePath; if (argc > 2) { outputPath = argv[2] } imwrite(outputPath,output_gpu); return 0; }
ca4131246f78982b22c216bd466aaf97b6b7c539.cu
#include <iostream> #include <cstdio> #include "opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include <cuda_runtime.h> #include "helper_cuda.h" #include "opencv2/calib3d/calib3d.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/opencv.hpp" #include "opencv2/gpu/gpu.hpp" #include <stdio.h> #include <ctime> #include <sys/time.h> using namespace std; using namespace cv; const int BLOCKDIM = 32; const int MAX_WINDOW = 11; __device__ const int FILTER_SIZE = 9; __device__ const int FILTER_HALFSIZE = FILTER_SIZE >> 1; __device__ void sort_bubble(float *x, int n_size) { for (int i = 0; i < n_size - 1; i++) { for(int j = 0; j < n_size - i - 1; j++) { if (x[j] > x[j+1]) { float temp = x[j]; x[j] = x[j+1]; x[j+1] = temp; } } } } __device__ int index(int x, int y, int width) { return (y * width) + x; } __device__ int clamp(int value, int bound) { if (value < 0) { return 1; } if (value < bound) { return value; } return bound - 1; } __global__ void median_filter_2d(unsigned char* input, unsigned char* output, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if((x<width) && (y<height)) { const int color_tid = index(x,y,width); float windowMedian[MAX_WINDOW*MAX_WINDOW]; int windowElements = 0; for (int x_iter = x - FILTER_HALFSIZE; x_iter <= x + FILTER_HALFSIZE; x_iter ++) { for (int y_iter = y - FILTER_HALFSIZE; y_iter <= y + FILTER_HALFSIZE; y_iter++) { if (0<=x_iter && x_iter < width && 0 <= y_iter && y_iter < height) { windowMedian[windowElements++] = input[index(x_iter,y_iter,width)]; } } } sort_bubble(windowMedian,windowElements); output[color_tid] = windowMedian[windowElements/2]; } } __global__ void median_filter_2d_sm(unsigned char* input, unsigned char* output, int width, int height) { __shared__ int sharedPixels[BLOCKDIM + FILTER_SIZE][BLOCKDIM + FILTER_SIZE]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; int xBlockLimit_max = blockDim.x - FILTER_HALFSIZE - 1; int yBlockLimit_max = blockDim.y - FILTER_HALFSIZE - 1; int xBlockLimit_min = FILTER_HALFSIZE; int yBlockLimit_min = FILTER_HALFSIZE; if (threadIdx.x > xBlockLimit_max && threadIdx.y > yBlockLimit_max) { int i = index(clamp(x + FILTER_HALFSIZE,width), clamp(y + FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + 2*FILTER_HALFSIZE][threadIdx.y + 2*FILTER_HALFSIZE] = pixel; } if (threadIdx.x > xBlockLimit_max && threadIdx.y < yBlockLimit_min) { int i = index(clamp(x + FILTER_HALFSIZE,width), clamp(y - FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + 2*FILTER_HALFSIZE][threadIdx.y] = pixel; } if (threadIdx.x < xBlockLimit_min && threadIdx.y > yBlockLimit_max) { int i = index(clamp(x - FILTER_HALFSIZE,width), clamp(y + FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x][threadIdx.y + 2*FILTER_HALFSIZE] = pixel; } if (threadIdx.x < xBlockLimit_min && threadIdx.y < yBlockLimit_min) { int i = index(clamp(x - FILTER_HALFSIZE,width), clamp(y - FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x][threadIdx.y] = pixel; } if (threadIdx.x < xBlockLimit_min) { int i = index(clamp(x - FILTER_HALFSIZE,width), clamp(y,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x][threadIdx.y + FILTER_HALFSIZE] = pixel; } if (threadIdx.x > xBlockLimit_max) { int i = index(clamp(x + FILTER_HALFSIZE,width), clamp(y,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + 2*FILTER_HALFSIZE][threadIdx.y + FILTER_HALFSIZE] = pixel; } if (threadIdx.y < yBlockLimit_min) { int i = index(clamp(x,width), clamp(y - FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + FILTER_HALFSIZE][threadIdx.y] = pixel; } if (threadIdx.y > yBlockLimit_max) { int i = index(clamp(x,width), clamp(y + FILTER_HALFSIZE,height), width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + FILTER_HALFSIZE][threadIdx.y + 2*FILTER_HALFSIZE] = pixel; } int i = index(x, y, width); unsigned int pixel = input[i]; sharedPixels[threadIdx.x + FILTER_HALFSIZE][threadIdx.y + FILTER_HALFSIZE] = pixel; __syncthreads(); if((x<width) && (y<height)) { const int color_tid = y * width + x; float windowMedian[MAX_WINDOW*MAX_WINDOW]; int windowElements = 0; for (int x_iter = 0; x_iter < FILTER_SIZE; x_iter ++) { for (int y_iter = 0; y_iter < FILTER_SIZE; y_iter++) { if (0<=x_iter && x_iter < width && 0 <= y_iter && y_iter < height) { windowMedian[windowElements++] = sharedPixels[threadIdx.x + x_iter][threadIdx.y + y_iter]; } } } sort_bubble(windowMedian,windowElements); output[color_tid] = windowMedian[windowElements/2]; } } void median_filter_wrapper(const cv::Mat& input, cv::Mat& output) { unsigned char *d_input, *d_output; cudaError_t cudaStatus; cudaStatus = cudaMalloc<unsigned char>(&d_input,input.rows*input.cols); checkCudaErrors(cudaStatus); cudaStatus = cudaMalloc<unsigned char>(&d_output,output.rows*output.cols); checkCudaErrors(cudaStatus); cudaStatus = cudaMemcpy(d_input,input.ptr(),input.rows*input.cols,cudaMemcpyHostToDevice); checkCudaErrors(cudaStatus); const dim3 block(BLOCKDIM,BLOCKDIM); const dim3 grid(input.cols/BLOCKDIM, input.rows/BLOCKDIM); median_filter_2d<<<grid,block>>>(d_input,d_output,input.cols,input.rows); cudaStatus = cudaDeviceSynchronize(); checkCudaErrors(cudaStatus); cudaStatus = cudaMemcpy(output.ptr(),d_output,output.rows*output.cols,cudaMemcpyDeviceToHost); checkCudaErrors(cudaStatus); cudaStatus = cudaFree(d_input); checkCudaErrors(cudaStatus); cudaStatus = cudaFree(d_output); checkCudaErrors(cudaStatus); } int main(int argc, char* argv[]) { if (argc >= 2) { cout << " Usage: program image.format [output.format]" << endl; return -1; } string imagePath = argv[1]; Mat input = imread(imagePath,0); if(input.empty()) { cout<<"Could not load image. Check location and try again."<<endl; cin.get(); return -1; } Mat output_gpu(input.rows,input.cols,CV_8UC1); bilateral_filter_wrapper(input,output_gpu); string outputPath = imagePath; if (argc > 2) { outputPath = argv[2] } imwrite(outputPath,output_gpu); return 0; }
a41ccc13ee020e09787df64fc933829f1a3cca9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2011-2013 Gerhard Reitmayr, TU Graz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "fusion.h" #include "helpers.h" #include "thirdparty/perfstats.h" //#include <iostream> //using namespace std; PerfStats Stats; __global__ void setSphere( Volume volume, const float3 center, const float radius, const float val ){ uint3 pos = make_uint3(thr2pos2()); for(pos.z = 0; pos.z < volume.size.z; ++pos.z) { const float d = length(volume.pos(pos) - center); if(d < radius) volume.set(pos, make_float2(val, 0.0f)); } } __global__ void setBox( Volume volume, const float3 min_corner, const float3 max_corner, const float val ){ uint3 pos = make_uint3(thr2pos2()); for(pos.z = 0; pos.z < volume.size.z; ++pos.z) { const float3 p = volume.pos(pos); if(min_corner.x < p.x && min_corner.y < p.y && min_corner.z < p.z && p.x < max_corner.x && p.y < max_corner.y && p.z < max_corner.z ) volume.set(pos, make_float2(val, 0.0f)); } } void initVolumeWrap( Volume volume, const float val ){ dim3 block(32,16); hipLaunchKernelGGL(( initVolume), dim3(divup(dim3(volume.size.x, volume.size.y), block)), dim3(block), 0, 0, volume, make_float2(val, 0.0f)); } void setBoxWrap(Volume volume, const float3 min_corner, const float3 max_corner, const float val ){ dim3 block(32,16); hipLaunchKernelGGL(( setBox), dim3(divup(dim3(volume.size.x, volume.size.y), block)), dim3(block), 0, 0, volume, min_corner, max_corner, val); } void setSphereWrap(Volume volume, const float3 center, const float radius, const float val ){ dim3 block(32,16); hipLaunchKernelGGL(( setSphere), dim3(divup(dim3(volume.size.x, volume.size.y), block)), dim3(block), 0, 0, volume, center, radius, val); } __global__ void renderNormals( Image<uchar3> out, const Image<float3> in ){ float3 n = in.el(); if(n.x == -2) out.el() = make_uchar3(0,0,0); else { n = normalize(n); out.el() = make_uchar3(n.x*128 + 128, n.y*128+128, n.z*128+128); } } void renderNormalMap( Image<uchar3> out, const Image<float3> & normal ){ dim3 block(20,20); hipLaunchKernelGGL(( renderNormals), dim3(divup(normal.size, block)), dim3(block), 0, 0, out, normal ); } __global__ void renderLightKernel( Image<uchar4> out, const Image<float3> vertex, const Image<float3> normal, const float3 light, const float3 ambient ){ if(normal.el().x == -2.0f) out.el() = make_uchar4(0,0,0,255); else { const float3 diff = normalize(light - vertex.el()); const float dir = fmaxf(dot(normal.el(), diff), 0.f); const float3 col = clamp(make_float3(dir) + ambient, 0.f, 1.f) * 255; out.el() = make_uchar4(col.x, col.y, col.z, 255); } } void renderLight( Image<uchar4> out, const Image<float3> & vertex, const Image<float3> & normal, const float3 light, const float3 ambient ){ dim3 block(32,16); hipLaunchKernelGGL(( renderLightKernel), dim3(divup(normal.size, block)), dim3(block), 0, 0, out, vertex, normal, light, ambient ); } __global__ void renderTextureKernel( Image<uchar4> out, const Image<float3> vertex, const Image<float3> normal, const Image<uchar3> texture, const Matrix4 texproj, const float3 light){ if(normal.el().x == -2.0f) out.el() = make_uchar4(0,0,0,255); else { const float3 proj = texproj * vertex.el(); const float2 projPixel = make_float2( proj.x / proj.z + 0.5f, proj.y / proj.z + 0.5f); const float3 diff = normalize(light - vertex.el()); const float dir = fmaxf(dot(normal.el(), diff), 0.f); // * 255; if(projPixel.x < 0 || projPixel.x > texture.size.x-1 || projPixel.y < 0 || projPixel.y > texture.size.y-1 ){ out.el() = make_uchar4(dir*255,dir*255,dir*255,255); } else { const uchar3 texcol = texture[make_uint2(projPixel.x, projPixel.y)]; out.el() = make_uchar4(texcol.x*dir, texcol.y*dir, texcol.z*dir, 255); } } } void renderTexture( Image<uchar4> out, const Image<float3> & vertex, const Image<float3> & normal, const Image<uchar3> & texture, const Matrix4 & texproj, const float3 light){ dim3 block(32,16); hipLaunchKernelGGL(( renderTextureKernel), dim3(divup(normal.size, block)), dim3(block), 0, 0, out, vertex, normal, texture, texproj, light); } __global__ void renderDepth( Image<uchar3> out, const Image<float> depth, const float nearPlane, const float farPlane){ const float d = (clamp(depth.el(), nearPlane, farPlane) - nearPlane) / (farPlane - nearPlane); out.el() = make_uchar3(d * 255, d * 255, d * 255); } void renderDepthMap( Image<uchar3> out, const Image<float> & depth, const float nearPlane, const float farPlane ){ dim3 block(32,16); hipLaunchKernelGGL(( renderDepth), dim3(divup(depth.size, block)), dim3(block), 0, 0, out, depth, nearPlane, farPlane ); } __global__ void renderTrack( Image<uchar4> out, const Image<TrackData> data ){ const uint2 pos = thr2pos2(); switch(data[pos].result){ case 1: out[pos] = make_uchar4(128, 128, 128,0); // ok break; case -1: out[pos] = make_uchar4(0, 0, 0,0); // no input break; case -2: out[pos] = make_uchar4(255,0,0,0); // not in image break; case -3: out[pos] = make_uchar4(0,255,0,0); // no correspondence break; case -4: out[pos] = make_uchar4(0,0,255,0); // to far away break; case -5: out[pos] = make_uchar4(255,255,0,0); // wrong normal break; } } void renderTrackResult( Image<uchar4> out, const Image<TrackData> & data ){ dim3 block(32,16); hipLaunchKernelGGL(( renderTrack), dim3(divup(out.size, block)), dim3(block), 0, 0, out, data ); } __global__ void raycastLight( Image<uchar4> render, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep, const float3 light, const float3 ambient){ const uint2 pos = thr2pos2(); float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep); if(hit.w > 0){ const float3 test = make_float3(hit); const float3 surfNorm = volume.grad(test); if(length(surfNorm) > 0){ const float3 diff = normalize(light - test); const float dir = fmaxf(dot(normalize(surfNorm), diff), 0.f); const float3 col = clamp(make_float3(dir) + ambient, 0.f, 1.f) * 255; render.el() = make_uchar4(col.x, col.y, col.z,0); } else { render.el() = make_uchar4(0,0,0,0); } } else { render.el() = make_uchar4(0,0,0,0); } } void renderVolumeLight( Image<uchar4> out, const Volume & volume, const Matrix4 view, const float nearPlane, const float farPlane, const float largestep, const float3 light, const float3 ambient ){ dim3 block(16,16); hipLaunchKernelGGL(( raycastLight), dim3(divup(out.size, block)), dim3(block), 0, 0, out, volume, view, nearPlane, farPlane, volume.dim.x/volume.size.x, largestep, light, ambient ); } __global__ void raycastInput( Image<float3> pos3D, Image<float3> normal, Image<float> depth, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){ const uint2 pos = thr2pos2(); float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep); if(hit.w > 0){ pos3D[pos] = make_float3(hit); depth[pos] = hit.w; float3 surfNorm = volume.grad(make_float3(hit)); if(length(surfNorm) == 0){ normal[pos].x = -2; } else { normal[pos] = normalize(surfNorm); } } else { pos3D[pos] = make_float3(0); normal[pos] = make_float3(0); depth[pos] = 0; } } void renderInput( Image<float3> pos3D, Image<float3> normal, Image<float> depth, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){ dim3 block(16,16); hipLaunchKernelGGL(( raycastInput), dim3(divup(pos3D.size, block)), dim3(block), 0, 0, pos3D, normal, depth, volume, view, nearPlane, farPlane, step, largestep); }
a41ccc13ee020e09787df64fc933829f1a3cca9e.cu
/* Copyright (c) 2011-2013 Gerhard Reitmayr, TU Graz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "fusion.h" #include "helpers.h" #include "thirdparty/perfstats.h" //#include <iostream> //using namespace std; PerfStats Stats; __global__ void setSphere( Volume volume, const float3 center, const float radius, const float val ){ uint3 pos = make_uint3(thr2pos2()); for(pos.z = 0; pos.z < volume.size.z; ++pos.z) { const float d = length(volume.pos(pos) - center); if(d < radius) volume.set(pos, make_float2(val, 0.0f)); } } __global__ void setBox( Volume volume, const float3 min_corner, const float3 max_corner, const float val ){ uint3 pos = make_uint3(thr2pos2()); for(pos.z = 0; pos.z < volume.size.z; ++pos.z) { const float3 p = volume.pos(pos); if(min_corner.x < p.x && min_corner.y < p.y && min_corner.z < p.z && p.x < max_corner.x && p.y < max_corner.y && p.z < max_corner.z ) volume.set(pos, make_float2(val, 0.0f)); } } void initVolumeWrap( Volume volume, const float val ){ dim3 block(32,16); initVolume<<<divup(dim3(volume.size.x, volume.size.y), block), block>>>(volume, make_float2(val, 0.0f)); } void setBoxWrap(Volume volume, const float3 min_corner, const float3 max_corner, const float val ){ dim3 block(32,16); setBox<<<divup(dim3(volume.size.x, volume.size.y), block), block>>>(volume, min_corner, max_corner, val); } void setSphereWrap(Volume volume, const float3 center, const float radius, const float val ){ dim3 block(32,16); setSphere<<<divup(dim3(volume.size.x, volume.size.y), block), block>>>(volume, center, radius, val); } __global__ void renderNormals( Image<uchar3> out, const Image<float3> in ){ float3 n = in.el(); if(n.x == -2) out.el() = make_uchar3(0,0,0); else { n = normalize(n); out.el() = make_uchar3(n.x*128 + 128, n.y*128+128, n.z*128+128); } } void renderNormalMap( Image<uchar3> out, const Image<float3> & normal ){ dim3 block(20,20); renderNormals<<<divup(normal.size, block), block>>>( out, normal ); } __global__ void renderLightKernel( Image<uchar4> out, const Image<float3> vertex, const Image<float3> normal, const float3 light, const float3 ambient ){ if(normal.el().x == -2.0f) out.el() = make_uchar4(0,0,0,255); else { const float3 diff = normalize(light - vertex.el()); const float dir = fmaxf(dot(normal.el(), diff), 0.f); const float3 col = clamp(make_float3(dir) + ambient, 0.f, 1.f) * 255; out.el() = make_uchar4(col.x, col.y, col.z, 255); } } void renderLight( Image<uchar4> out, const Image<float3> & vertex, const Image<float3> & normal, const float3 light, const float3 ambient ){ dim3 block(32,16); renderLightKernel<<<divup(normal.size, block), block>>>( out, vertex, normal, light, ambient ); } __global__ void renderTextureKernel( Image<uchar4> out, const Image<float3> vertex, const Image<float3> normal, const Image<uchar3> texture, const Matrix4 texproj, const float3 light){ if(normal.el().x == -2.0f) out.el() = make_uchar4(0,0,0,255); else { const float3 proj = texproj * vertex.el(); const float2 projPixel = make_float2( proj.x / proj.z + 0.5f, proj.y / proj.z + 0.5f); const float3 diff = normalize(light - vertex.el()); const float dir = fmaxf(dot(normal.el(), diff), 0.f); // * 255; if(projPixel.x < 0 || projPixel.x > texture.size.x-1 || projPixel.y < 0 || projPixel.y > texture.size.y-1 ){ out.el() = make_uchar4(dir*255,dir*255,dir*255,255); } else { const uchar3 texcol = texture[make_uint2(projPixel.x, projPixel.y)]; out.el() = make_uchar4(texcol.x*dir, texcol.y*dir, texcol.z*dir, 255); } } } void renderTexture( Image<uchar4> out, const Image<float3> & vertex, const Image<float3> & normal, const Image<uchar3> & texture, const Matrix4 & texproj, const float3 light){ dim3 block(32,16); renderTextureKernel<<<divup(normal.size, block), block>>>( out, vertex, normal, texture, texproj, light); } __global__ void renderDepth( Image<uchar3> out, const Image<float> depth, const float nearPlane, const float farPlane){ const float d = (clamp(depth.el(), nearPlane, farPlane) - nearPlane) / (farPlane - nearPlane); out.el() = make_uchar3(d * 255, d * 255, d * 255); } void renderDepthMap( Image<uchar3> out, const Image<float> & depth, const float nearPlane, const float farPlane ){ dim3 block(32,16); renderDepth<<<divup(depth.size, block), block>>>( out, depth, nearPlane, farPlane ); } __global__ void renderTrack( Image<uchar4> out, const Image<TrackData> data ){ const uint2 pos = thr2pos2(); switch(data[pos].result){ case 1: out[pos] = make_uchar4(128, 128, 128,0); // ok break; case -1: out[pos] = make_uchar4(0, 0, 0,0); // no input break; case -2: out[pos] = make_uchar4(255,0,0,0); // not in image break; case -3: out[pos] = make_uchar4(0,255,0,0); // no correspondence break; case -4: out[pos] = make_uchar4(0,0,255,0); // to far away break; case -5: out[pos] = make_uchar4(255,255,0,0); // wrong normal break; } } void renderTrackResult( Image<uchar4> out, const Image<TrackData> & data ){ dim3 block(32,16); renderTrack<<<divup(out.size, block), block>>>( out, data ); } __global__ void raycastLight( Image<uchar4> render, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep, const float3 light, const float3 ambient){ const uint2 pos = thr2pos2(); float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep); if(hit.w > 0){ const float3 test = make_float3(hit); const float3 surfNorm = volume.grad(test); if(length(surfNorm) > 0){ const float3 diff = normalize(light - test); const float dir = fmaxf(dot(normalize(surfNorm), diff), 0.f); const float3 col = clamp(make_float3(dir) + ambient, 0.f, 1.f) * 255; render.el() = make_uchar4(col.x, col.y, col.z,0); } else { render.el() = make_uchar4(0,0,0,0); } } else { render.el() = make_uchar4(0,0,0,0); } } void renderVolumeLight( Image<uchar4> out, const Volume & volume, const Matrix4 view, const float nearPlane, const float farPlane, const float largestep, const float3 light, const float3 ambient ){ dim3 block(16,16); raycastLight<<<divup(out.size, block), block>>>( out, volume, view, nearPlane, farPlane, volume.dim.x/volume.size.x, largestep, light, ambient ); } __global__ void raycastInput( Image<float3> pos3D, Image<float3> normal, Image<float> depth, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){ const uint2 pos = thr2pos2(); float4 hit = raycast( volume, pos, view, nearPlane, farPlane, step, largestep); if(hit.w > 0){ pos3D[pos] = make_float3(hit); depth[pos] = hit.w; float3 surfNorm = volume.grad(make_float3(hit)); if(length(surfNorm) == 0){ normal[pos].x = -2; } else { normal[pos] = normalize(surfNorm); } } else { pos3D[pos] = make_float3(0); normal[pos] = make_float3(0); depth[pos] = 0; } } void renderInput( Image<float3> pos3D, Image<float3> normal, Image<float> depth, const Volume volume, const Matrix4 view, const float nearPlane, const float farPlane, const float step, const float largestep){ dim3 block(16,16); raycastInput<<<divup(pos3D.size, block), block>>>(pos3D, normal, depth, volume, view, nearPlane, farPlane, step, largestep); }
a8472b59cd768d5d602b1e33cdb410054f563359.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/ztranspose.cu normal z -> s, Tue Feb 9 16:05:33 2016 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_s #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) __device__ void stranspose_device( int m, int n, const float *A, int lda, float *AT, int ldat) { __shared__ float sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void stranspose_kernel( int m, int n, const float *A, int lda, float *AT, int ldat) { stranspose_device(m, n, A, lda, AT, ldat); } __global__ void stranspose_kernel_batched( int m, int n, float **dA_array, int lda, float **dAT_array, int ldat) { int batchid = blockIdx.z; stranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /** Purpose ------- stranspose copies and transposes a matrix dA to matrix dAT. Same as stranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA REAL array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT REAL array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_q( magma_int_t m, magma_int_t n, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); hipLaunchKernelGGL(( stranspose_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dAT, lddat ); } /** Purpose ------- stranspose_batched copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as stranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array REAL* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array REAL* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_batched( magma_int_t m, magma_int_t n, float **dA_array, magma_int_t ldda, float **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY, 1 ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); hipLaunchKernelGGL(( stranspose_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA_array, ldda, dAT_array, lddat ); }
a8472b59cd768d5d602b1e33cdb410054f563359.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/ztranspose.cu normal z -> s, Tue Feb 9 16:05:33 2016 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_s #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) __device__ void stranspose_device( int m, int n, const float *A, int lda, float *AT, int ldat) { __shared__ float sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void stranspose_kernel( int m, int n, const float *A, int lda, float *AT, int ldat) { stranspose_device(m, n, A, lda, AT, ldat); } __global__ void stranspose_kernel_batched( int m, int n, float **dA_array, int lda, float **dAT_array, int ldat) { int batchid = blockIdx.z; stranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /** Purpose ------- stranspose copies and transposes a matrix dA to matrix dAT. Same as stranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA REAL array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT REAL array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_q( magma_int_t m, magma_int_t n, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); stranspose_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dAT, lddat ); } /** Purpose ------- stranspose_batched copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as stranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array REAL* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array REAL* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_batched( magma_int_t m, magma_int_t n, float **dA_array, magma_int_t ldda, float **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY, 1 ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); stranspose_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA_array, ldda, dAT_array, lddat ); }
daf03458c2f482dbdbd5b595c0ca8a3cc068fb9c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <iostream> #include <fstream> # include <sys/time.h> #define ASIZE 256 struct timeval tim; double dTime1,dTime2,dTime3; int c=0; __global__ void processPattern(char* x ,int m, int shifts[]) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= m ) return; char c = x[idx]; for( int i = m - 1; i >= idx; --i ) { if ( x[i] == c ) { shifts[c] = m - i; return; } } } __global__ void search(char *x, int m, char* y, int n, int shifts[], int indx[], int results[]) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx > (n - m) ) return; if ( indx[idx] != idx ) return; unsigned int yes = 1; for( int i = 0; i < m; ++i ) { if ( x[i] != y[idx + i] ) { yes = 0; break; } } results[idx] = yes; } char* readfile(const char* filename) { FILE* f; char* data; f= fopen(filename, "r"); if ( f != NULL ) { fseek(f,0,SEEK_END); int size=ftell(f); fseek(f,0,SEEK_SET); data = (char*)malloc((size+1) * sizeof(char)); fread(data, size,1,f); } fclose(f); return data; } void precomputeShiftIndx(char* y, int n, int m, int shifts[], int indx[]) { int j = 0; int limit = n - m; while (j <= limit ) { j += shifts[ y[j + m] ]; indx[j] = j; } } void display_results(int n, int res[]) { for( int i =0; i < n; ++i ) if ( res[i] == 1 ) c++; // printf("\n\nCount:%d\n\n",c); // printf("%d. Found match at %d\n",j++, i); } int main(int argc, char* argv[]) { int cuda_device = 0; size_t n = 0; size_t m = 0; if ( argc < 4 ) { // printf("Usage: ./a.out <device number> <pattern> <data file>\n"); return -1; } if( argc > 1 ) cuda_device = atoi( argv[1] ); char* mainString = readfile(argv[3]); char* subString = (char*) malloc( (strlen(argv[2])+ 1) * sizeof(char) ); strcpy(subString, argv[2]); n = strlen(mainString)-1; m = strlen(subString); int* results=(int*)malloc(n * sizeof(int)); int* l_shifts = (int*)malloc( ASIZE * sizeof(int) ); for( int i = 0; i < ASIZE; ++i ) l_shifts[i] = m + 1; int* l_indx = (int*) malloc( n * sizeof(int) ); for( int i = 0; i < n; ++i ) { l_indx[i] = -1; results[i]=0; } // hipError_t error; hipEvent_t start_event, stop_event; float time1, time2; checkCudaErrors( hipEventCreate(&start_event) ); checkCudaErrors( hipEventCreate(&stop_event) ); int num_devices=0; checkCudaErrors( hipGetDeviceCount(&num_devices) ); /* if(0==num_devices) { // printf("Your system does not have a CUDA capable device\n"); return 1; } */ // if( cuda_device >= num_devices ) // { // if(num_devices==0) // printf("You have only 1 device and it's id is 0\n"); // else // printf("choose device ID between 0 and %d\n", num_devices-1); // return 1; // } //hipSetDevice( cuda_device ); hipDeviceProp_t deviceProp; checkCudaErrors( hipGetDeviceProperties(&deviceProp, cuda_device) ); // if( (1 == deviceProp.major) && (deviceProp.minor < 1)) // printf("%s does not have compute capability 1.1 or later\n", deviceProp.name); // printf("Device name : %s\n", deviceProp.name ); // printf("CUDA Capable SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // printf("array_size = %zd\n", n); char* d_substr = 0; int* d_shifts = 0; int* d_indx = 0; char* d_text = 0; int* d_results = 0; checkCudaErrors( hipMalloc((void**)&d_shifts, sizeof(int)*ASIZE)); checkCudaErrors( hipMalloc((void**)&d_indx, n * sizeof(int)) ); checkCudaErrors( hipMalloc((void**)&d_results, n * sizeof(int)) ); checkCudaErrors( hipMalloc((void**)&d_substr, (m + 1)*sizeof(char)) ); checkCudaErrors( hipMalloc((void**)&d_text, (strlen(mainString)+1)*sizeof(char)) ); checkCudaErrors( hipMemcpy(d_shifts, l_shifts, sizeof(int) * ASIZE, hipMemcpyHostToDevice ) ); checkCudaErrors( hipMemcpy(d_results, results, sizeof(int) * n, hipMemcpyHostToDevice ) ); checkCudaErrors( hipMemcpy(d_text, mainString, sizeof(char)*(strlen(mainString)+1), hipMemcpyHostToDevice ) ); checkCudaErrors( hipMemcpy(d_substr, subString, sizeof(char)*(strlen(subString)), hipMemcpyHostToDevice) ); // error = hipGetLastError(); // printf("%s\n", hipGetErrorString(error)); dim3 threadsPerBlocks(ASIZE, 1); int t = m / threadsPerBlocks.x; int t1 = m % threadsPerBlocks.x; if ( t1 != 0 ) t += 1; dim3 numBlocks(t,1); // printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks.x, threadsPerBlocks.x); hipEventRecord(start_event, 0); hipLaunchKernelGGL(( processPattern), dim3(numBlocks),dim3(threadsPerBlocks), 0, 0, d_substr, m, d_shifts); hipDeviceSynchronize(); hipEventRecord(stop_event, 0); hipEventSynchronize( stop_event ); hipEventElapsedTime( &time1, start_event, stop_event ); checkCudaErrors( hipMemcpy(l_shifts, d_shifts, sizeof(int) * ASIZE, hipMemcpyDeviceToHost ) ); gettimeofday(&tim,NULL); dTime1=tim.tv_sec + (tim.tv_usec/1000000.0); precomputeShiftIndx(mainString , n, m, l_shifts, l_indx); gettimeofday(&tim,NULL); dTime2=tim.tv_sec + (tim.tv_usec/1000000.0); checkCudaErrors( hipMemcpy(d_indx, l_indx, n * sizeof(int), hipMemcpyHostToDevice) ); dTime3=dTime2-dTime1; /* // For debugging for( int i = 0; i < ASIZE; ++i ) printf("%d\t",l_shifts[i]); printf("\n\n"); for( int i = 0; i < n; ++i ) printf("%d\t",l_indx[i]); printf("\n\n"); printf("%zd\t%zd",n,m); printf("\n\n"); */ t = n / threadsPerBlocks.x; t1 = n % threadsPerBlocks.x; if ( t1 != 0 ) t += 1; dim3 numBlocks2(t, 1); // printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks2.x, threadsPerBlocks.x); hipEventRecord(start_event, 0); hipLaunchKernelGGL(( search), dim3(numBlocks2),dim3(threadsPerBlocks), 0, 0, d_substr, m, d_text, n, d_shifts, d_indx, d_results); hipDeviceSynchronize(); hipEventRecord(stop_event, 0); hipEventSynchronize( stop_event ); hipEventElapsedTime( &time2, start_event, stop_event ); hipEventDestroy( start_event ); hipEventDestroy( stop_event ); // printf("%f+%f=%lf\n",time1, time2, (double)(time1+time2+dTime3) ); printf("%lf\t", (double)(time1+time2+dTime3) ); checkCudaErrors( hipMemcpy(results, d_results, n * sizeof(int), hipMemcpyDeviceToHost) ); display_results(n, results); hipFree(d_substr); hipFree(d_shifts); hipFree(d_indx); hipFree(d_text); hipFree(d_results); free(mainString); free(subString); free(l_indx); free(l_shifts); free(results); hipDeviceReset(); }
daf03458c2f482dbdbd5b595c0ca8a3cc068fb9c.cu
#include <cuda.h> #include <helper_cuda.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <iostream> #include <fstream> # include <sys/time.h> #define ASIZE 256 struct timeval tim; double dTime1,dTime2,dTime3; int c=0; __global__ void processPattern(char* x ,int m, int shifts[]) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= m ) return; char c = x[idx]; for( int i = m - 1; i >= idx; --i ) { if ( x[i] == c ) { shifts[c] = m - i; return; } } } __global__ void search(char *x, int m, char* y, int n, int shifts[], int indx[], int results[]) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx > (n - m) ) return; if ( indx[idx] != idx ) return; unsigned int yes = 1; for( int i = 0; i < m; ++i ) { if ( x[i] != y[idx + i] ) { yes = 0; break; } } results[idx] = yes; } char* readfile(const char* filename) { FILE* f; char* data; f= fopen(filename, "r"); if ( f != NULL ) { fseek(f,0,SEEK_END); int size=ftell(f); fseek(f,0,SEEK_SET); data = (char*)malloc((size+1) * sizeof(char)); fread(data, size,1,f); } fclose(f); return data; } void precomputeShiftIndx(char* y, int n, int m, int shifts[], int indx[]) { int j = 0; int limit = n - m; while (j <= limit ) { j += shifts[ y[j + m] ]; indx[j] = j; } } void display_results(int n, int res[]) { for( int i =0; i < n; ++i ) if ( res[i] == 1 ) c++; // printf("\n\nCount:%d\n\n",c); // printf("%d. Found match at %d\n",j++, i); } int main(int argc, char* argv[]) { int cuda_device = 0; size_t n = 0; size_t m = 0; if ( argc < 4 ) { // printf("Usage: ./a.out <device number> <pattern> <data file>\n"); return -1; } if( argc > 1 ) cuda_device = atoi( argv[1] ); char* mainString = readfile(argv[3]); char* subString = (char*) malloc( (strlen(argv[2])+ 1) * sizeof(char) ); strcpy(subString, argv[2]); n = strlen(mainString)-1; m = strlen(subString); int* results=(int*)malloc(n * sizeof(int)); int* l_shifts = (int*)malloc( ASIZE * sizeof(int) ); for( int i = 0; i < ASIZE; ++i ) l_shifts[i] = m + 1; int* l_indx = (int*) malloc( n * sizeof(int) ); for( int i = 0; i < n; ++i ) { l_indx[i] = -1; results[i]=0; } // cudaError_t error; cudaEvent_t start_event, stop_event; float time1, time2; checkCudaErrors( cudaEventCreate(&start_event) ); checkCudaErrors( cudaEventCreate(&stop_event) ); int num_devices=0; checkCudaErrors( cudaGetDeviceCount(&num_devices) ); /* if(0==num_devices) { // printf("Your system does not have a CUDA capable device\n"); return 1; } */ // if( cuda_device >= num_devices ) // { // if(num_devices==0) // printf("You have only 1 device and it's id is 0\n"); // else // printf("choose device ID between 0 and %d\n", num_devices-1); // return 1; // } //cudaSetDevice( cuda_device ); cudaDeviceProp deviceProp; checkCudaErrors( cudaGetDeviceProperties(&deviceProp, cuda_device) ); // if( (1 == deviceProp.major) && (deviceProp.minor < 1)) // printf("%s does not have compute capability 1.1 or later\n", deviceProp.name); // printf("Device name : %s\n", deviceProp.name ); // printf("CUDA Capable SM %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); // printf("array_size = %zd\n", n); char* d_substr = 0; int* d_shifts = 0; int* d_indx = 0; char* d_text = 0; int* d_results = 0; checkCudaErrors( cudaMalloc((void**)&d_shifts, sizeof(int)*ASIZE)); checkCudaErrors( cudaMalloc((void**)&d_indx, n * sizeof(int)) ); checkCudaErrors( cudaMalloc((void**)&d_results, n * sizeof(int)) ); checkCudaErrors( cudaMalloc((void**)&d_substr, (m + 1)*sizeof(char)) ); checkCudaErrors( cudaMalloc((void**)&d_text, (strlen(mainString)+1)*sizeof(char)) ); checkCudaErrors( cudaMemcpy(d_shifts, l_shifts, sizeof(int) * ASIZE, cudaMemcpyHostToDevice ) ); checkCudaErrors( cudaMemcpy(d_results, results, sizeof(int) * n, cudaMemcpyHostToDevice ) ); checkCudaErrors( cudaMemcpy(d_text, mainString, sizeof(char)*(strlen(mainString)+1), cudaMemcpyHostToDevice ) ); checkCudaErrors( cudaMemcpy(d_substr, subString, sizeof(char)*(strlen(subString)), cudaMemcpyHostToDevice) ); // error = cudaGetLastError(); // printf("%s\n", cudaGetErrorString(error)); dim3 threadsPerBlocks(ASIZE, 1); int t = m / threadsPerBlocks.x; int t1 = m % threadsPerBlocks.x; if ( t1 != 0 ) t += 1; dim3 numBlocks(t,1); // printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks.x, threadsPerBlocks.x); cudaEventRecord(start_event, 0); processPattern<<<numBlocks,threadsPerBlocks>>>(d_substr, m, d_shifts); cudaThreadSynchronize(); cudaEventRecord(stop_event, 0); cudaEventSynchronize( stop_event ); cudaEventElapsedTime( &time1, start_event, stop_event ); checkCudaErrors( cudaMemcpy(l_shifts, d_shifts, sizeof(int) * ASIZE, cudaMemcpyDeviceToHost ) ); gettimeofday(&tim,NULL); dTime1=tim.tv_sec + (tim.tv_usec/1000000.0); precomputeShiftIndx(mainString , n, m, l_shifts, l_indx); gettimeofday(&tim,NULL); dTime2=tim.tv_sec + (tim.tv_usec/1000000.0); checkCudaErrors( cudaMemcpy(d_indx, l_indx, n * sizeof(int), cudaMemcpyHostToDevice) ); dTime3=dTime2-dTime1; /* // For debugging for( int i = 0; i < ASIZE; ++i ) printf("%d\t",l_shifts[i]); printf("\n\n"); for( int i = 0; i < n; ++i ) printf("%d\t",l_indx[i]); printf("\n\n"); printf("%zd\t%zd",n,m); printf("\n\n"); */ t = n / threadsPerBlocks.x; t1 = n % threadsPerBlocks.x; if ( t1 != 0 ) t += 1; dim3 numBlocks2(t, 1); // printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks2.x, threadsPerBlocks.x); cudaEventRecord(start_event, 0); search<<<numBlocks2,threadsPerBlocks>>>(d_substr, m, d_text, n, d_shifts, d_indx, d_results); cudaThreadSynchronize(); cudaEventRecord(stop_event, 0); cudaEventSynchronize( stop_event ); cudaEventElapsedTime( &time2, start_event, stop_event ); cudaEventDestroy( start_event ); cudaEventDestroy( stop_event ); // printf("%f+%f=%lf\n",time1, time2, (double)(time1+time2+dTime3) ); printf("%lf\t", (double)(time1+time2+dTime3) ); checkCudaErrors( cudaMemcpy(results, d_results, n * sizeof(int), cudaMemcpyDeviceToHost) ); display_results(n, results); cudaFree(d_substr); cudaFree(d_shifts); cudaFree(d_indx); cudaFree(d_text); cudaFree(d_results); free(mainString); free(subString); free(l_indx); free(l_shifts); free(results); cudaThreadExit(); }
f957d7f2a1bb1c9b4472f8c05b57f9a93f0fe651.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { thrust::host_vector<int> hst_in(idata, idata + n); thrust::device_vector<int> dev_in = hst_in; thrust::device_vector<int> dev_out(n); thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_out.begin()); thrust::host_vector<int> hst_out = dev_out; for (int i = 0; i < n; i++) { odata[i] = hst_out[i]; } } } }
f957d7f2a1bb1c9b4472f8c05b57f9a93f0fe651.cu
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { thrust::host_vector<int> hst_in(idata, idata + n); thrust::device_vector<int> dev_in = hst_in; thrust::device_vector<int> dev_out(n); thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_out.begin()); thrust::host_vector<int> hst_out = dev_out; for (int i = 0; i < n; i++) { odata[i] = hst_out[i]; } } } }
aaba552d3ae0bbee97417aec5d7a20d3c8939aaa.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Normalize_forward_.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); float *norm = NULL; hipMalloc(&norm, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int size23 = XSIZE*YSIZE; int size123 = XSIZE*YSIZE; int size0123 = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Normalize_forward_), dim3(gridBlock),dim3(threadBlock), 0, 0, input,norm,output,size23,size123,size0123); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Normalize_forward_), dim3(gridBlock),dim3(threadBlock), 0, 0, input,norm,output,size23,size123,size0123); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Normalize_forward_), dim3(gridBlock),dim3(threadBlock), 0, 0, input,norm,output,size23,size123,size0123); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
aaba552d3ae0bbee97417aec5d7a20d3c8939aaa.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Normalize_forward_.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); float *norm = NULL; cudaMalloc(&norm, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int size23 = XSIZE*YSIZE; int size123 = XSIZE*YSIZE; int size0123 = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Normalize_forward_<<<gridBlock,threadBlock>>>(input,norm,output,size23,size123,size0123); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Normalize_forward_<<<gridBlock,threadBlock>>>(input,norm,output,size23,size123,size0123); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Normalize_forward_<<<gridBlock,threadBlock>>>(input,norm,output,size23,size123,size0123); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
53ffb019f632b8eef9e8b60728d4f7b1ba907861.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // for the older gpus atomicAdd with double arguments does not exist #if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__) static __inline__ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); } while (assumed != old); return __longlong_as_double(old); } // static __inline__ __device__ double atomicMin(double* address, double val) { // unsigned long long int* address_as_ull = (unsigned long long int*)address; // unsigned long long int old = *address_as_ull, assumed; // do { // assumed = old; // old = atomicCAS(address_as_ull, assumed, // __double_as_longlong(fminf(val, __longlong_as_double(assumed)))); // // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); // } while (assumed != old); // return __longlong_as_double(old); // } #endif namespace{ __device__ float atomicMin(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fminf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } template <typename scalar_t> __device__ __forceinline__ bool check_face_frontside(const scalar_t *face) { return (face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]); } template <typename scalar_t> __device__ __forceinline__ bool check_pixel_inside(const scalar_t *w) { return w[0] <= 1 && w[0] >= 0 && w[1] <= 1 && w[1] >= 0 && w[2] <= 1 && w[2] >= 0; } template <typename scalar_t> __device__ __forceinline__ void barycentric_clip(scalar_t *w) { for (int k = 0; k < 3; k++) w[k] = max(min(w[k], 1.), 0.); const scalar_t w_sum = max(w[0] + w[1] + w[2], 1e-5); for (int k = 0; k < 3; k++) w[k] /= w_sum; } template <typename scalar_t> __global__ void forward_rasterize_cuda_kernel( // const scalar_t* __restrict__ vertices, //[bz, nv, 3] const scalar_t* __restrict__ face_vertices, //[bz, nf, 3, 3] float* depth_buffer, int* triangle_buffer, float* baryw_buffer, int batch_size, int h, int w, int ntri) { /* batch number, face, number, image size, face[v012][RGB] */ const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * ntri) { return; } // const int is = image_size; const scalar_t* face = &face_vertices[i * 9]; scalar_t bw[3]; // scalar_t depth_min = 10000000; /* return if backside */ // if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0])) // return; /* p[num][xy]: x, y is (-1, 1). */ scalar_t p[3][2]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 2; dim++) { p[num][dim] = face[3 * num + dim]; // no normalize } } /* compute face_inv */ scalar_t face_inv_star[9] = { p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1], p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1], p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]}; scalar_t face_inv_determinant = ( p[2][0] * (p[0][1] - p[1][1]) + p[0][0] * (p[1][1] - p[2][1]) + p[1][0] * (p[2][1] - p[0][1])); face_inv_determinant = face_inv_determinant > 0 ? max(face_inv_determinant, 1e-10) : min(face_inv_determinant, -1e-10); /* set to global memory */ scalar_t face_inv[9]; for (int k = 0; k < 9; k++) { face_inv[k] = face_inv_star[k] / face_inv_determinant; } int x_min = max((int)ceil(min(p[0][0], min(p[1][0], p[2][0]))), 0); int x_max = min((int)floor(max(p[0][0], max(p[1][0], p[2][0]))), w - 1); int y_min = max((int)ceil(min(p[0][1], min(p[1][1], p[2][1]))), 0); int y_max = min((int)floor(max(p[0][1], max(p[1][1], p[2][1]))), h - 1); int bn = i/ntri; for(int y = y_min; y <= y_max; y++) //h { for(int x = x_min; x <= x_max; x++) //w { bw[0] = face_inv[3 * 0 + 0] * x + face_inv[3 * 0 + 1] * y + face_inv[3 * 0 + 2]; bw[1] = face_inv[3 * 1 + 0] * x + face_inv[3 * 1 + 1] * y + face_inv[3 * 1 + 2]; bw[2] = face_inv[3 * 2 + 0] * x + face_inv[3 * 2 + 1] * y + face_inv[3 * 2 + 2]; barycentric_clip(bw); if(check_pixel_inside(bw))// && check_face_frontside(face)) { // const // barycentric_clip(bw); scalar_t zp = 1. / (bw[0] / face[2] + bw[1] / face[5] + bw[2] / face[8]); atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); if(depth_buffer[bn*h*w + y*w + x] == zp) { // depth_min = zp; // atomic long long for two int // scalar_t tri_ind = i%ntri; // atomicAdd( (int*)&depth_buffer[bn*h*w + y*w + x], (int)zp); // atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); triangle_buffer[bn*h*w + y*w + x] = (int)(i%ntri); for(int k=0; k<3; k++){ baryw_buffer[bn*h*w*3 + y*w*3 + x*3 + k] = bw[k]; } // buffers[bn*h*w*2 + y*w*2 + x*2 + 1] = p_depth; } } } } } template <typename scalar_t> __global__ void forward_rasterize_colors_cuda_kernel( // const scalar_t* __restrict__ vertices, //[bz, nv, 3] const scalar_t* __restrict__ face_vertices, //[bz, nf, 3, 3] const scalar_t* __restrict__ face_colors, //[bz, nf, 3, 3] float* depth_buffer, int* triangle_buffer, float* images, int batch_size, int h, int w, int ntri) { /* batch number, face, number, image size, face[v012][RGB] */ const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * ntri) { return; } // const int is = image_size; const scalar_t* face = &face_vertices[i * 9]; const scalar_t* color = &face_colors[i * 9]; int bn = i/ntri; scalar_t bw[3]; // scalar_t depth_min = 10000000; /* return if backside */ // if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0])) // return; /* p[num][xy]: x, y is (-1, 1). */ scalar_t p[3][2]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 2; dim++) { p[num][dim] = face[3 * num + dim]; // no normalize } } scalar_t cl[3][3]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 3; dim++) { cl[num][dim] = color[3 * num + dim]; //[3p,3rgb] } } /* compute face_inv */ scalar_t face_inv_star[9] = { p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1], p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1], p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]}; scalar_t face_inv_determinant = ( p[2][0] * (p[0][1] - p[1][1]) + p[0][0] * (p[1][1] - p[2][1]) + p[1][0] * (p[2][1] - p[0][1])); face_inv_determinant = face_inv_determinant > 0 ? max(face_inv_determinant, 1e-10) : min(face_inv_determinant, -1e-10); /* set to global memory */ scalar_t face_inv[9]; for (int k = 0; k < 9; k++) { face_inv[k] = face_inv_star[k] / face_inv_determinant; } int x_min = max((int)ceil(min(p[0][0], min(p[1][0], p[2][0]))), 0); int x_max = min((int)floor(max(p[0][0], max(p[1][0], p[2][0]))), w - 1); int y_min = max((int)ceil(min(p[0][1], min(p[1][1], p[2][1]))), 0); int y_max = min((int)floor(max(p[0][1], max(p[1][1], p[2][1]))), h - 1); for(int y = y_min; y <= y_max; y++) //h { for(int x = x_min; x <= x_max; x++) //w { bw[0] = face_inv[3 * 0 + 0] * x + face_inv[3 * 0 + 1] * y + face_inv[3 * 0 + 2]; bw[1] = face_inv[3 * 1 + 0] * x + face_inv[3 * 1 + 1] * y + face_inv[3 * 1 + 2]; bw[2] = face_inv[3 * 2 + 0] * x + face_inv[3 * 2 + 1] * y + face_inv[3 * 2 + 2]; if(check_pixel_inside(bw))// && check_face_frontside(face)) { // const barycentric_clip(bw); scalar_t zp = 1. / (bw[0] / face[2] + bw[1] / face[5] + bw[2] / face[8]); atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); if(depth_buffer[bn*h*w + y*w + x] == zp) { // depth_min = zp; // atomic long long for two int // scalar_t tri_ind = i%ntri; // atomicAdd( (int*)&depth_buffer[bn*h*w + y*w + x], (int)zp); // atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); triangle_buffer[bn*h*w + y*w + x] = (int)(i%ntri); for(int k=0; k<3; k++){ // baryw_buffer[bn*h*w*3 + y*w*3 + x*3 + k] = bw[k]; images[bn*h*w*3 + y*w*3 + x*3 + k] = bw[0]*cl[0][k] + bw[1]*cl[1][k] + bw[2]*cl[2][k]; } // buffers[bn*h*w*2 + y*w*2 + x*2 + 1] = p_depth; } } } } } } std::vector<at::Tensor> forward_rasterize_cuda( at::Tensor face_vertices, at::Tensor depth_buffer, at::Tensor triangle_buffer, at::Tensor baryw_buffer, int h, int w){ const auto batch_size = face_vertices.size(0); const auto ntri = face_vertices.size(1); // print(channel_size) const int threads = 512; const dim3 blocks_1 ((batch_size * ntri - 1) / threads +1); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_cuda1", ([&] { hipLaunchKernelGGL(( forward_rasterize_cuda_kernel<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, face_vertices.data<scalar_t>(), depth_buffer.data<float>(), triangle_buffer.data<int>(), baryw_buffer.data<float>(), batch_size, h, w, ntri); })); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_cuda2", ([&] { hipLaunchKernelGGL(( forward_rasterize_cuda_kernel<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, face_vertices.data<scalar_t>(), depth_buffer.data<float>(), triangle_buffer.data<int>(), baryw_buffer.data<float>(), batch_size, h, w, ntri); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error in forward_rasterize_cuda_kernel: %s\n", hipGetErrorString(err)); return {depth_buffer, triangle_buffer, baryw_buffer}; } std::vector<at::Tensor> forward_rasterize_colors_cuda( at::Tensor face_vertices, at::Tensor face_colors, at::Tensor depth_buffer, at::Tensor triangle_buffer, at::Tensor images, int h, int w){ const auto batch_size = face_vertices.size(0); const auto ntri = face_vertices.size(1); // print(channel_size) const int threads = 512; const dim3 blocks_1 ((batch_size * ntri - 1) / threads +1); //initial AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_colors_cuda", ([&] { hipLaunchKernelGGL(( forward_rasterize_colors_cuda_kernel<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, face_vertices.data<scalar_t>(), face_colors.data<scalar_t>(), depth_buffer.data<float>(), triangle_buffer.data<int>(), images.data<float>(), batch_size, h, w, ntri); })); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_colors_cuda", ([&] { hipLaunchKernelGGL(( forward_rasterize_colors_cuda_kernel<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, face_vertices.data<scalar_t>(), face_colors.data<scalar_t>(), depth_buffer.data<float>(), triangle_buffer.data<int>(), images.data<float>(), batch_size, h, w, ntri); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error in forward_rasterize_cuda_kernel: %s\n", hipGetErrorString(err)); return {depth_buffer, triangle_buffer, images}; }
53ffb019f632b8eef9e8b60728d4f7b1ba907861.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> // for the older gpus atomicAdd with double arguments does not exist #if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__) static __inline__ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); } while (assumed != old); return __longlong_as_double(old); } // static __inline__ __device__ double atomicMin(double* address, double val) { // unsigned long long int* address_as_ull = (unsigned long long int*)address; // unsigned long long int old = *address_as_ull, assumed; // do { // assumed = old; // old = atomicCAS(address_as_ull, assumed, // __double_as_longlong(fminf(val, __longlong_as_double(assumed)))); // // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); // } while (assumed != old); // return __longlong_as_double(old); // } #endif namespace{ __device__ float atomicMin(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fminf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } template <typename scalar_t> __device__ __forceinline__ bool check_face_frontside(const scalar_t *face) { return (face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0]); } template <typename scalar_t> __device__ __forceinline__ bool check_pixel_inside(const scalar_t *w) { return w[0] <= 1 && w[0] >= 0 && w[1] <= 1 && w[1] >= 0 && w[2] <= 1 && w[2] >= 0; } template <typename scalar_t> __device__ __forceinline__ void barycentric_clip(scalar_t *w) { for (int k = 0; k < 3; k++) w[k] = max(min(w[k], 1.), 0.); const scalar_t w_sum = max(w[0] + w[1] + w[2], 1e-5); for (int k = 0; k < 3; k++) w[k] /= w_sum; } template <typename scalar_t> __global__ void forward_rasterize_cuda_kernel( // const scalar_t* __restrict__ vertices, //[bz, nv, 3] const scalar_t* __restrict__ face_vertices, //[bz, nf, 3, 3] float* depth_buffer, int* triangle_buffer, float* baryw_buffer, int batch_size, int h, int w, int ntri) { /* batch number, face, number, image size, face[v012][RGB] */ const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * ntri) { return; } // const int is = image_size; const scalar_t* face = &face_vertices[i * 9]; scalar_t bw[3]; // scalar_t depth_min = 10000000; /* return if backside */ // if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0])) // return; /* p[num][xy]: x, y is (-1, 1). */ scalar_t p[3][2]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 2; dim++) { p[num][dim] = face[3 * num + dim]; // no normalize } } /* compute face_inv */ scalar_t face_inv_star[9] = { p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1], p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1], p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]}; scalar_t face_inv_determinant = ( p[2][0] * (p[0][1] - p[1][1]) + p[0][0] * (p[1][1] - p[2][1]) + p[1][0] * (p[2][1] - p[0][1])); face_inv_determinant = face_inv_determinant > 0 ? max(face_inv_determinant, 1e-10) : min(face_inv_determinant, -1e-10); /* set to global memory */ scalar_t face_inv[9]; for (int k = 0; k < 9; k++) { face_inv[k] = face_inv_star[k] / face_inv_determinant; } int x_min = max((int)ceil(min(p[0][0], min(p[1][0], p[2][0]))), 0); int x_max = min((int)floor(max(p[0][0], max(p[1][0], p[2][0]))), w - 1); int y_min = max((int)ceil(min(p[0][1], min(p[1][1], p[2][1]))), 0); int y_max = min((int)floor(max(p[0][1], max(p[1][1], p[2][1]))), h - 1); int bn = i/ntri; for(int y = y_min; y <= y_max; y++) //h { for(int x = x_min; x <= x_max; x++) //w { bw[0] = face_inv[3 * 0 + 0] * x + face_inv[3 * 0 + 1] * y + face_inv[3 * 0 + 2]; bw[1] = face_inv[3 * 1 + 0] * x + face_inv[3 * 1 + 1] * y + face_inv[3 * 1 + 2]; bw[2] = face_inv[3 * 2 + 0] * x + face_inv[3 * 2 + 1] * y + face_inv[3 * 2 + 2]; barycentric_clip(bw); if(check_pixel_inside(bw))// && check_face_frontside(face)) { // const // barycentric_clip(bw); scalar_t zp = 1. / (bw[0] / face[2] + bw[1] / face[5] + bw[2] / face[8]); atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); if(depth_buffer[bn*h*w + y*w + x] == zp) { // depth_min = zp; // atomic long long for two int // scalar_t tri_ind = i%ntri; // atomicAdd( (int*)&depth_buffer[bn*h*w + y*w + x], (int)zp); // atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); triangle_buffer[bn*h*w + y*w + x] = (int)(i%ntri); for(int k=0; k<3; k++){ baryw_buffer[bn*h*w*3 + y*w*3 + x*3 + k] = bw[k]; } // buffers[bn*h*w*2 + y*w*2 + x*2 + 1] = p_depth; } } } } } template <typename scalar_t> __global__ void forward_rasterize_colors_cuda_kernel( // const scalar_t* __restrict__ vertices, //[bz, nv, 3] const scalar_t* __restrict__ face_vertices, //[bz, nf, 3, 3] const scalar_t* __restrict__ face_colors, //[bz, nf, 3, 3] float* depth_buffer, int* triangle_buffer, float* images, int batch_size, int h, int w, int ntri) { /* batch number, face, number, image size, face[v012][RGB] */ const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * ntri) { return; } // const int is = image_size; const scalar_t* face = &face_vertices[i * 9]; const scalar_t* color = &face_colors[i * 9]; int bn = i/ntri; scalar_t bw[3]; // scalar_t depth_min = 10000000; /* return if backside */ // if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0])) // return; /* p[num][xy]: x, y is (-1, 1). */ scalar_t p[3][2]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 2; dim++) { p[num][dim] = face[3 * num + dim]; // no normalize } } scalar_t cl[3][3]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 3; dim++) { cl[num][dim] = color[3 * num + dim]; //[3p,3rgb] } } /* compute face_inv */ scalar_t face_inv_star[9] = { p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1], p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1], p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]}; scalar_t face_inv_determinant = ( p[2][0] * (p[0][1] - p[1][1]) + p[0][0] * (p[1][1] - p[2][1]) + p[1][0] * (p[2][1] - p[0][1])); face_inv_determinant = face_inv_determinant > 0 ? max(face_inv_determinant, 1e-10) : min(face_inv_determinant, -1e-10); /* set to global memory */ scalar_t face_inv[9]; for (int k = 0; k < 9; k++) { face_inv[k] = face_inv_star[k] / face_inv_determinant; } int x_min = max((int)ceil(min(p[0][0], min(p[1][0], p[2][0]))), 0); int x_max = min((int)floor(max(p[0][0], max(p[1][0], p[2][0]))), w - 1); int y_min = max((int)ceil(min(p[0][1], min(p[1][1], p[2][1]))), 0); int y_max = min((int)floor(max(p[0][1], max(p[1][1], p[2][1]))), h - 1); for(int y = y_min; y <= y_max; y++) //h { for(int x = x_min; x <= x_max; x++) //w { bw[0] = face_inv[3 * 0 + 0] * x + face_inv[3 * 0 + 1] * y + face_inv[3 * 0 + 2]; bw[1] = face_inv[3 * 1 + 0] * x + face_inv[3 * 1 + 1] * y + face_inv[3 * 1 + 2]; bw[2] = face_inv[3 * 2 + 0] * x + face_inv[3 * 2 + 1] * y + face_inv[3 * 2 + 2]; if(check_pixel_inside(bw))// && check_face_frontside(face)) { // const barycentric_clip(bw); scalar_t zp = 1. / (bw[0] / face[2] + bw[1] / face[5] + bw[2] / face[8]); atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); if(depth_buffer[bn*h*w + y*w + x] == zp) { // depth_min = zp; // atomic long long for two int // scalar_t tri_ind = i%ntri; // atomicAdd( (int*)&depth_buffer[bn*h*w + y*w + x], (int)zp); // atomicMin(&depth_buffer[bn*h*w + y*w + x], zp); triangle_buffer[bn*h*w + y*w + x] = (int)(i%ntri); for(int k=0; k<3; k++){ // baryw_buffer[bn*h*w*3 + y*w*3 + x*3 + k] = bw[k]; images[bn*h*w*3 + y*w*3 + x*3 + k] = bw[0]*cl[0][k] + bw[1]*cl[1][k] + bw[2]*cl[2][k]; } // buffers[bn*h*w*2 + y*w*2 + x*2 + 1] = p_depth; } } } } } } std::vector<at::Tensor> forward_rasterize_cuda( at::Tensor face_vertices, at::Tensor depth_buffer, at::Tensor triangle_buffer, at::Tensor baryw_buffer, int h, int w){ const auto batch_size = face_vertices.size(0); const auto ntri = face_vertices.size(1); // print(channel_size) const int threads = 512; const dim3 blocks_1 ((batch_size * ntri - 1) / threads +1); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_cuda1", ([&] { forward_rasterize_cuda_kernel<scalar_t><<<blocks_1, threads>>>( face_vertices.data<scalar_t>(), depth_buffer.data<float>(), triangle_buffer.data<int>(), baryw_buffer.data<float>(), batch_size, h, w, ntri); })); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_cuda2", ([&] { forward_rasterize_cuda_kernel<scalar_t><<<blocks_1, threads>>>( face_vertices.data<scalar_t>(), depth_buffer.data<float>(), triangle_buffer.data<int>(), baryw_buffer.data<float>(), batch_size, h, w, ntri); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in forward_rasterize_cuda_kernel: %s\n", cudaGetErrorString(err)); return {depth_buffer, triangle_buffer, baryw_buffer}; } std::vector<at::Tensor> forward_rasterize_colors_cuda( at::Tensor face_vertices, at::Tensor face_colors, at::Tensor depth_buffer, at::Tensor triangle_buffer, at::Tensor images, int h, int w){ const auto batch_size = face_vertices.size(0); const auto ntri = face_vertices.size(1); // print(channel_size) const int threads = 512; const dim3 blocks_1 ((batch_size * ntri - 1) / threads +1); //initial AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_colors_cuda", ([&] { forward_rasterize_colors_cuda_kernel<scalar_t><<<blocks_1, threads>>>( face_vertices.data<scalar_t>(), face_colors.data<scalar_t>(), depth_buffer.data<float>(), triangle_buffer.data<int>(), images.data<float>(), batch_size, h, w, ntri); })); AT_DISPATCH_FLOATING_TYPES(face_vertices.type(), "forward_rasterize_colors_cuda", ([&] { forward_rasterize_colors_cuda_kernel<scalar_t><<<blocks_1, threads>>>( face_vertices.data<scalar_t>(), face_colors.data<scalar_t>(), depth_buffer.data<float>(), triangle_buffer.data<int>(), images.data<float>(), batch_size, h, w, ntri); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in forward_rasterize_cuda_kernel: %s\n", cudaGetErrorString(err)); return {depth_buffer, triangle_buffer, images}; }
f3a018b95d3e47959010f96242898cf971cf1ea9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Library Definition #include <iostream> //cout #include <fstream> //Files #include <cstdlib> //atoi function //Constant Definition #define PI 3.141592654 #define blocksize 32 #define n 512 #define p 128 //Print matrix into standard output void print(double * M,int cols,int rows); /* DEVICE FUNCTIONS */ //Matrix transposition (Rows and Cols of M) __global__ void matrixTrans(double * M,double * MT, int rows, int cols); //Matrix multiplication(Cols and Rows of the result) __global__ void matrixMul(double * a,double * b, double * C, int cols,int rows,int cols2); //INVERSION OF MATRICES ----GAUSS JORDAN METHOD -------- void Inverse(double * A, double * I,int nn); __global__ void nodiag_normalize(double *A, double *I, int nn, int i); __global__ void diag_normalize(double *A, double *I, int nn, int i); __global__ void gaussjordan(double *A, double *I, int nn, int i); __global__ void set_zero(double *A, double *I, int nn, int i); //Sum of Matrices __global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols); //Initialization of matrices, ones, zeros, identity void set_ones(double * M, int l); void set_zeros(double * M, int l); void set_iden(double * M, int l); //Print matrices into external files void print_file(char const * NameArch, const double * M,int cols,int rows); //Random numbers double normal_rand(void); /* MAIN FUNCTION */ int main(int argc, char * argv[]){ srand(atoi(argv[1])); //Seed recieved from terminal //int cols=p; //int raws=n; double *X, *Xt, *XXt, *Inv; double *H0,*H, *J, *Suma; double *Y,*Yt, *aux, *Id; int size0 = n * sizeof(double); int size2 = p * p * sizeof(double); int size3 = n * n * sizeof(double); int size4 = n * p * sizeof(double); hipMallocManaged(&X,size4); hipMallocManaged(&Xt,size4); hipMallocManaged(&H0,size4); hipMallocManaged(&H,size3); hipMallocManaged(&J,size3); hipMallocManaged(&Suma,size3); hipMallocManaged(&XXt,size2); hipMallocManaged(&Yt,size0); hipMallocManaged(&Inv,size2); hipMallocManaged(&Y,size0); hipMallocManaged(&aux,size0); hipMallocManaged(&Id,size3); double suma1=0,suma2=0; for(int row=0;row<n;row++){ for(int col=0;col<p;col++){ X[row*p+col]=normal_rand(); Y[col]=normal_rand(); } } print_file("x.dat",X,p,n); set_iden(Inv,p); dim3 threadsPerBlock(blocksize, blocksize); dim3 numBlocks((p + blocksize - 1) / blocksize, (n + blocksize - 1) / blocksize); hipLaunchKernelGGL(( matrixTrans), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, X,Xt,n,p); hipDeviceSynchronize(); hipLaunchKernelGGL(( matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Xt,X,XXt,p,p,n); hipDeviceSynchronize(); //std::cout<<"XXt"<<std::endl; //print_file("xxt.dat",XXt,p,p); Inverse(XXt,Inv,p); hipDeviceSynchronize(); //std::cout<<"inv"<<std::endl; print_file("Inv.dat",Inv,p,p); //matrixMul<<<numBlocks,threadsPerBlock>>>(X,Xt,XXt,p,p,n); //hipDeviceSynchronize(); hipLaunchKernelGGL(( matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, X,Inv,H0,p,n,p); //matrixMul<<<numBlocks,threadsPerBlock>>>(Inv,Xt,H0,p,n,p); hipDeviceSynchronize(); //print_file("H0.dat",H0,p,n); hipLaunchKernelGGL(( matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, H0,Xt,H,n,n,p); //matrixMul<<<numBlocks,threadsPerBlock>>>(X,H0,H,n,n,p); hipDeviceSynchronize(); //print_file("H.dat",H,n,n); set_ones(J,n);hipLaunchKernelGGL(( matrixSum), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, H,J,Suma,1.,-1./n, n,n); hipDeviceSynchronize(); hipLaunchKernelGGL(( matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Suma,Y,aux,1,n,p); hipDeviceSynchronize(); hipLaunchKernelGGL(( matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Y,aux,J,1,1,n); hipDeviceSynchronize(); suma1=J[0]; //print(J,p,p); set_ones(J,n); set_iden(Id,n); set_zeros(Suma,n*n);hipLaunchKernelGGL(( matrixSum), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Id,J,Suma,1.,-1./n, n, n); hipDeviceSynchronize(); //print(Id,p,p); //print(J,p,p); //print(Suma,p,p); set_zeros(aux,n); //print(Suma,n,n); //print(Y,1,n);hipLaunchKernelGGL(( matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Suma,Y,aux,1,n,n); hipDeviceSynchronize(); //print(aux,1,n); //print(Y,1,p); //print(aux,1,p);hipLaunchKernelGGL(( matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Y,aux,J,1,1,n); hipDeviceSynchronize(); suma2=J[0]; std::cout<<suma1/suma2<<std::endl; hipFree(X); hipFree(Xt); hipFree(XXt); hipFree(Inv); hipFree(H0); hipFree(H); hipFree(J); hipFree(Suma); return 0; } void print(double * M,int cols,int rows){ for( int row = 0; row < rows; ++row ){ for( int col = 0; col < cols; ++col ) { std::cout<<M[col + row*cols]<<'\t'; } std::cout<<"\n"; } } __global__ void matrixTrans(double * M,double * MT, int rows, int cols) { double val=0; int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < rows && col < cols){ val = M[col + row*cols]; MT[row + col*rows] = val; } } __global__ void matrixMul(double * a,double * b, double * C, int cols,int rows,int cols2) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < rows && col < cols){ for (int k = 0; k < cols2; k++){ C[row*cols+col]+=b[k*cols+col]*a[row*cols2+k]; } } } __global__ void nodiag_normalize(double *A, double *I, int nn, int i){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x< nn && y < nn){ if (x < nn && y < nn){ if (x == i && x!=y){ I[x*nn + y] /= A[i*nn + i]; A[x*nn + y] /= A[i*nn + i]; } } } } __global__ void diag_normalize(double *A, double *I, int nn, int i){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < nn && y < nn){ if (x == y && x == i){ I[x*nn + y] /= A[i*nn + i]; A[x*nn + y] /= A[i*nn + i]; } } } __global__ void gaussjordan(double *A, double *I, int nn, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x< nn && y < nn){ if (x < nn && y < nn){ if (x != i){ I[x*nn + y] -= I[i*nn + y] * A[x*nn + i]; if (y != i){ A[x*nn + y] -= A[i*nn + y] * A[x*nn + i]; } } } } } __global__ void set_zero(double *A, double *I, int nn, int i){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < nn && y < nn){ if (x != i){ if (y == i){ A[x*nn + y] = 0; } } } } void Inverse(double * A, double * I,int nn){ dim3 threadsPerBlock2(blocksize, blocksize); dim3 numBlocks2((nn + blocksize - 1) / blocksize, (nn + blocksize - 1) / blocksize); for (int i = 0; i<nn; i++){ nodiag_normalize << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i); diag_normalize << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i); gaussjordan << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i); set_zero << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i); } hipDeviceSynchronize(); } __global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < rows && col < cols){ Msum[row + col*rows] = alpha*M1[row+col*rows]+beta*M2[row+col*rows]; } } void print_file(char const * NameArch, const double * M,int cols,int rows){ std::ofstream File(NameArch); File.precision(16); for( int row = 0; row < rows; ++row ){ for( int col = 0; col < cols; ++col ) { File<<M[col + row*cols]<<'\t'; } File<<"\n"; } File.close(); } // Random number generator as per Abramowitz & Stegun // Source taken from: // http://c-faq.com/lib/gaussian.html double normal_rand(void){ static double U, V; static int phase = 0; double Z; if(phase == 0) { U = (rand() + 1.) / (RAND_MAX + 2.); V = rand() / (RAND_MAX + 1.); Z = sqrt(-2 * log(U)) * sin(2 * PI * V); } else Z = sqrt(-2 * log(U)) * cos(2 * PI * V); phase = 1 - phase; return Z; } void set_iden(double * M, int l){ for(int row=0;row<l;row++){ for(int col=0;col<l;col++){ M[row*l+col]=0; if (col==row){ M[row*l+col]=1; } } } } void set_ones(double * M, int l){ for(int row=0;row<l;row++){ for(int col=0;col<l;col++){ M[row*l+col]=1; } } } void set_zeros(double * M, int l){ for(int row=0;row<l;row++){ M[row]=0; } }
f3a018b95d3e47959010f96242898cf971cf1ea9.cu
//Library Definition #include <iostream> //cout #include <fstream> //Files #include <cstdlib> //atoi function //Constant Definition #define PI 3.141592654 #define blocksize 32 #define n 512 #define p 128 //Print matrix into standard output void print(double * M,int cols,int rows); /* DEVICE FUNCTIONS */ //Matrix transposition (Rows and Cols of M) __global__ void matrixTrans(double * M,double * MT, int rows, int cols); //Matrix multiplication(Cols and Rows of the result) __global__ void matrixMul(double * a,double * b, double * C, int cols,int rows,int cols2); //INVERSION OF MATRICES ----GAUSS JORDAN METHOD -------- void Inverse(double * A, double * I,int nn); __global__ void nodiag_normalize(double *A, double *I, int nn, int i); __global__ void diag_normalize(double *A, double *I, int nn, int i); __global__ void gaussjordan(double *A, double *I, int nn, int i); __global__ void set_zero(double *A, double *I, int nn, int i); //Sum of Matrices __global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols); //Initialization of matrices, ones, zeros, identity void set_ones(double * M, int l); void set_zeros(double * M, int l); void set_iden(double * M, int l); //Print matrices into external files void print_file(char const * NameArch, const double * M,int cols,int rows); //Random numbers double normal_rand(void); /* MAIN FUNCTION */ int main(int argc, char * argv[]){ srand(atoi(argv[1])); //Seed recieved from terminal //int cols=p; //int raws=n; double *X, *Xt, *XXt, *Inv; double *H0,*H, *J, *Suma; double *Y,*Yt, *aux, *Id; int size0 = n * sizeof(double); int size2 = p * p * sizeof(double); int size3 = n * n * sizeof(double); int size4 = n * p * sizeof(double); cudaMallocManaged(&X,size4); cudaMallocManaged(&Xt,size4); cudaMallocManaged(&H0,size4); cudaMallocManaged(&H,size3); cudaMallocManaged(&J,size3); cudaMallocManaged(&Suma,size3); cudaMallocManaged(&XXt,size2); cudaMallocManaged(&Yt,size0); cudaMallocManaged(&Inv,size2); cudaMallocManaged(&Y,size0); cudaMallocManaged(&aux,size0); cudaMallocManaged(&Id,size3); double suma1=0,suma2=0; for(int row=0;row<n;row++){ for(int col=0;col<p;col++){ X[row*p+col]=normal_rand(); Y[col]=normal_rand(); } } print_file("x.dat",X,p,n); set_iden(Inv,p); dim3 threadsPerBlock(blocksize, blocksize); dim3 numBlocks((p + blocksize - 1) / blocksize, (n + blocksize - 1) / blocksize); matrixTrans<<<numBlocks,threadsPerBlock>>>(X,Xt,n,p); cudaDeviceSynchronize(); matrixMul<<<numBlocks,threadsPerBlock>>>(Xt,X,XXt,p,p,n); cudaDeviceSynchronize(); //std::cout<<"XXt"<<std::endl; //print_file("xxt.dat",XXt,p,p); Inverse(XXt,Inv,p); cudaDeviceSynchronize(); //std::cout<<"inv"<<std::endl; print_file("Inv.dat",Inv,p,p); //matrixMul<<<numBlocks,threadsPerBlock>>>(X,Xt,XXt,p,p,n); //cudaDeviceSynchronize(); matrixMul<<<numBlocks,threadsPerBlock>>>(X,Inv,H0,p,n,p); //matrixMul<<<numBlocks,threadsPerBlock>>>(Inv,Xt,H0,p,n,p); cudaDeviceSynchronize(); //print_file("H0.dat",H0,p,n); matrixMul<<<numBlocks,threadsPerBlock>>>(H0,Xt,H,n,n,p); //matrixMul<<<numBlocks,threadsPerBlock>>>(X,H0,H,n,n,p); cudaDeviceSynchronize(); //print_file("H.dat",H,n,n); set_ones(J,n); matrixSum<<<numBlocks,threadsPerBlock>>>(H,J,Suma,1.,-1./n, n,n); cudaDeviceSynchronize(); matrixMul<<<numBlocks,threadsPerBlock>>>(Suma,Y,aux,1,n,p); cudaDeviceSynchronize(); matrixMul<<<numBlocks,threadsPerBlock>>>(Y,aux,J,1,1,n); cudaDeviceSynchronize(); suma1=J[0]; //print(J,p,p); set_ones(J,n); set_iden(Id,n); set_zeros(Suma,n*n); matrixSum<<<numBlocks,threadsPerBlock>>>(Id,J,Suma,1.,-1./n, n, n); cudaDeviceSynchronize(); //print(Id,p,p); //print(J,p,p); //print(Suma,p,p); set_zeros(aux,n); //print(Suma,n,n); //print(Y,1,n); matrixMul<<<numBlocks,threadsPerBlock>>>(Suma,Y,aux,1,n,n); cudaDeviceSynchronize(); //print(aux,1,n); //print(Y,1,p); //print(aux,1,p); matrixMul<<<numBlocks,threadsPerBlock>>>(Y,aux,J,1,1,n); cudaDeviceSynchronize(); suma2=J[0]; std::cout<<suma1/suma2<<std::endl; cudaFree(X); cudaFree(Xt); cudaFree(XXt); cudaFree(Inv); cudaFree(H0); cudaFree(H); cudaFree(J); cudaFree(Suma); return 0; } void print(double * M,int cols,int rows){ for( int row = 0; row < rows; ++row ){ for( int col = 0; col < cols; ++col ) { std::cout<<M[col + row*cols]<<'\t'; } std::cout<<"\n"; } } __global__ void matrixTrans(double * M,double * MT, int rows, int cols) { double val=0; int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < rows && col < cols){ val = M[col + row*cols]; MT[row + col*rows] = val; } } __global__ void matrixMul(double * a,double * b, double * C, int cols,int rows,int cols2) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < rows && col < cols){ for (int k = 0; k < cols2; k++){ C[row*cols+col]+=b[k*cols+col]*a[row*cols2+k]; } } } __global__ void nodiag_normalize(double *A, double *I, int nn, int i){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x< nn && y < nn){ if (x < nn && y < nn){ if (x == i && x!=y){ I[x*nn + y] /= A[i*nn + i]; A[x*nn + y] /= A[i*nn + i]; } } } } __global__ void diag_normalize(double *A, double *I, int nn, int i){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < nn && y < nn){ if (x == y && x == i){ I[x*nn + y] /= A[i*nn + i]; A[x*nn + y] /= A[i*nn + i]; } } } __global__ void gaussjordan(double *A, double *I, int nn, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if ( x< nn && y < nn){ if (x < nn && y < nn){ if (x != i){ I[x*nn + y] -= I[i*nn + y] * A[x*nn + i]; if (y != i){ A[x*nn + y] -= A[i*nn + y] * A[x*nn + i]; } } } } } __global__ void set_zero(double *A, double *I, int nn, int i){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < nn && y < nn){ if (x != i){ if (y == i){ A[x*nn + y] = 0; } } } } void Inverse(double * A, double * I,int nn){ dim3 threadsPerBlock2(blocksize, blocksize); dim3 numBlocks2((nn + blocksize - 1) / blocksize, (nn + blocksize - 1) / blocksize); for (int i = 0; i<nn; i++){ nodiag_normalize << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i); diag_normalize << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i); gaussjordan << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i); set_zero << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i); } cudaDeviceSynchronize(); } __global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < rows && col < cols){ Msum[row + col*rows] = alpha*M1[row+col*rows]+beta*M2[row+col*rows]; } } void print_file(char const * NameArch, const double * M,int cols,int rows){ std::ofstream File(NameArch); File.precision(16); for( int row = 0; row < rows; ++row ){ for( int col = 0; col < cols; ++col ) { File<<M[col + row*cols]<<'\t'; } File<<"\n"; } File.close(); } // Random number generator as per Abramowitz & Stegun // Source taken from: // http://c-faq.com/lib/gaussian.html double normal_rand(void){ static double U, V; static int phase = 0; double Z; if(phase == 0) { U = (rand() + 1.) / (RAND_MAX + 2.); V = rand() / (RAND_MAX + 1.); Z = sqrt(-2 * log(U)) * sin(2 * PI * V); } else Z = sqrt(-2 * log(U)) * cos(2 * PI * V); phase = 1 - phase; return Z; } void set_iden(double * M, int l){ for(int row=0;row<l;row++){ for(int col=0;col<l;col++){ M[row*l+col]=0; if (col==row){ M[row*l+col]=1; } } } } void set_ones(double * M, int l){ for(int row=0;row<l;row++){ for(int col=0;col<l;col++){ M[row*l+col]=1; } } } void set_zeros(double * M, int l){ for(int row=0;row<l;row++){ M[row]=0; } }
a823f59f45cc08985227147819e31745a104d4a2.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" namespace filter { template void linearColumn<float3, uchar3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif /* CUDA_DISABLER */
a823f59f45cc08985227147819e31745a104d4a2.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" namespace filter { template void linearColumn<float3, uchar3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif /* CUDA_DISABLER */
4aa2026b8633e9964a6deb0f9294e8e17d4a4e52.hip
// !!! This is a file automatically generated by hipify!!! // CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania #include "rasterizeKernels.h" #include "rasterizeTools.h" //#include "hip/hip_runtime.h" glm::vec3* framebuffer; fragment* depthbuffer; float* device_vbo; float* device_nbo; float* device_cbo; int* device_ibo; vertex* verticies; triangle* primitives; sphere* spheres; int* primitiveStageBuffer; uniforms* device_uniforms; int* binBuffers; int* bufferCounters; int* tileBuffers; int* tileBufferCounters; void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } //Handy dandy little hashing function that provides seeds for random number generation __host__ __device__ unsigned int hash(unsigned int a){ a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } //Writes a given fragment to a fragment buffer at a given location __host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; depthbuffer[index] = frag; } } //Reads a fragment from a given location in a fragment buffer __host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return depthbuffer[index]; }else{ fragment f; return f; } } __host__ __device__ int getDepthBufferIndex(int x, int y, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y && x>=0 && y >= 0) return (y*resolution.x) + x; return -1; } __host__ __device__ float getDepthFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return depthbuffer[index].depth; }else{ return 0; } } __device__ unsigned long long int fatomicMin(unsigned long long int * addr, unsigned long long int value) { unsigned long long ret = *addr; while(value < ret) { unsigned long long old = ret; if((ret = atomicCAS(addr, old, value)) == old) break; } return ret; } //Writes a given pixel to a pixel buffer at a given location __host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; framebuffer[index] = value; } } //Reads a pixel from a pixel buffer at a given location __host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return framebuffer[index]; }else{ return glm::vec3(0,0,0); } } //Kernel that clears a given pixel buffer with a given color __global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = color; } } //Kernel that clears a given fragment buffer depth only. Everything else is ignored because it will be overwritten later __global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ buffer[index].depth= MAX_DEPTH; } } //Kernel that clears a given fragment buffer with a given fragment __global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ fragment f = frag; f.position.x = x; f.position.y = y; buffer[index] = f; } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } __global__ void vertexShadeKernel(float* vbo, int vbosize, float* nbo, int nbosize, float* cbo, int cbosize, vertex* verticies, uniforms* u_variables, pipelineOpts opts){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<vbosize/3){ vertex vOut; glm::vec4 vertexEyePos = glm::vec4(vbo[index*3+0],vbo[index*3+1],vbo[index*3+2],1.0); vertexEyePos = u_variables->viewTransform*u_variables->modelTransform*vertexEyePos; //Compute lighting vectors glm::vec4 eyeLightPos = u_variables->viewTransform*u_variables->lightPos; glm::vec4 eyeLightDir = (eyeLightPos - vertexEyePos); glm::vec4 halfVector = (eyeLightDir - vertexEyePos); //Normals are in eye space glm::vec4 vertexEyeNorm = glm::vec4(nbo[index*3+0],nbo[index*3+1],nbo[index*3+2],0.0); vertexEyeNorm = u_variables->viewTransform*u_variables->modelTransform*vertexEyeNorm; glm::vec3 vertexColor = glm::vec3(cbo[(index%3)*3+0],cbo[(index%3)*3+1],cbo[(index%3)*3+2]); //Apply perspective matrix and perspective division glm::vec4 pos = u_variables->perspectiveTransform*vertexEyePos; pos.x /= pos.w; pos.y /= pos.w; pos.z /= pos.w; //Emit vertex vOut.pos = glm::vec3(pos); vOut.eyeNormal = glm::normalize(glm::vec3(vertexEyeNorm)); vOut.eyeHalfVector = glm::normalize(glm::vec3(halfVector)); vOut.eyeLightDirection = glm::vec3(eyeLightDir); vOut.color = vertexColor; verticies[index] = vOut; } } //TODO: Implement primitive assembly __global__ void primitiveAssemblyKernel(vertex* verticies, int* ibo, int ibosize, triangle* primitives, int* primitiveStageBuffer, uniforms* u_variables, pipelineOpts opts) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; int primitivesCount = ibosize/3; if(index<primitivesCount){ //3 floats per vert, 3 verts per triangle triangle primitive; //Load verticies int vertIndex = ibo[index*3+0]; primitive.v0 = verticies[vertIndex]; vertIndex = ibo[index*3+1]; primitive.v1 = verticies[vertIndex]; vertIndex = ibo[index*3+2]; primitive.v2 = verticies[vertIndex]; //Write back primitive primitives[index] = primitive; primitiveStageBuffer[index] = index;//Throw triangle into buffer } } __global__ void backfaceCulling(triangle* primitives, int* primitiveStageBuffer, int NPrimitives, pipelineOpts opts) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < NPrimitives) { int primIndex = primitiveStageBuffer[index]; if(primIndex >= 0 && primIndex < NPrimitives){ triangle tri = primitives[primIndex]; float ux = tri.v1.pos.x-tri.v0.pos.x; float uy = tri.v1.pos.y-tri.v0.pos.y; float vx = tri.v2.pos.x-tri.v0.pos.x; float vy = tri.v2.pos.y-tri.v0.pos.y; float facing = ux*vy-uy*vx; if(facing < 0.0) { //Backface. Cull it. primitiveStageBuffer[index] = -1; } } } } __global__ void totalClipping(triangle* primitives, int* primitiveStageBuffer, int NPrimitives, pipelineOpts opts) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < NPrimitives) { int primIndex = primitiveStageBuffer[index]; if(primIndex >= 0 && primIndex < NPrimitives){ triangle tri = primitives[primIndex]; glm::vec3 minpoint, maxpoint; getAABBForTriangle(tri, minpoint,maxpoint); if(!isAABBInClipSpace(minpoint, maxpoint)) { //Backface. Cull it. primitiveStageBuffer[index] = -1; } } } } //TODO: Do this a lot more efficiently and in parallel __global__ void rasterizationKernel(triangle* primitives, int* primitiveStageBuffer, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, uniforms* u_variables, pipelineOpts opts) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<primitivesCount){ int triIndex = primitiveStageBuffer[index]; if(triIndex >= 0){ //For each primitive //Load triangle localy transformTriToScreenSpace(primitives[triIndex], resolution); triangle tri = primitives[triIndex]; //AABB for triangle glm::vec3 minPoint; glm::vec3 maxPoint; getAABBForTriangle(tri, minPoint, maxPoint); //Compute pixel range //Do some per-fragment clipping and restrict to screen space int minX = glm::max(glm::floor(minPoint.x),0.0f); int maxX = glm::min(glm::ceil(maxPoint.x),resolution.x); int minY = glm::max(glm::floor(minPoint.y),0.0f); int maxY = glm::min(glm::ceil(maxPoint.y),resolution.y); fragment frag; frag.primitiveIndex = index; //TODO: Do something more efficient than this for(int x = minX; x <= maxX; ++x) { for(int y = minY; y <= maxY; ++y) { int dbindex = getDepthBufferIndex(x,y,resolution); if(dbindex < 0) continue; frag.position.x = x; frag.position.y = y; glm::vec3 bCoords = calculateBarycentricCoordinate(tri, glm::vec2(x,y)); if(isBarycentricCoordInBounds(bCoords)) { //Blend values. frag.depth = tri.v0.pos.z*bCoords.x+tri.v1.pos.z*bCoords.y+tri.v2.pos.z*bCoords.z; if(frag.depth > 0.0f && frag.depth < 1.0f) { //Only continue if pixel is in screen. frag.color = tri.v0.color*bCoords.x+tri.v1.color*bCoords.y+tri.v2.color*bCoords.z; frag.normal = glm::normalize(tri.v0.eyeNormal*bCoords.x+tri.v1.eyeNormal*bCoords.y+tri.v2.eyeNormal*bCoords.z); frag.lightDir = glm::normalize(tri.v0.eyeLightDirection*bCoords.x+tri.v1.eyeLightDirection*bCoords.y+tri.v2.eyeLightDirection*bCoords.z); frag.halfVector = glm::normalize(tri.v0.eyeHalfVector*bCoords.x+tri.v1.eyeHalfVector*bCoords.y+tri.v2.eyeHalfVector*bCoords.z); fatomicMin(&(depthbuffer[dbindex].depthPrimTag),frag.depthPrimTag); if(frag.depthPrimTag == depthbuffer[dbindex].depthPrimTag)//If this is true, we won the race condition writeToDepthbuffer(x,y,frag, depthbuffer,resolution); } } } } } } } __host__ __device__ void depthFSImpl(fragment* depthbuffer, int index, uniforms* u_variables, pipelineOpts opts) { float depth = depthbuffer[index].depth; if(depth < 1.0f) depthbuffer[index].color = glm::vec3(1.0f-depth); } __host__ __device__ void ambientFSImpl(fragment* depthbuffer, int index, uniforms* u_variables, pipelineOpts opts) { //Do nothing. Interpolated color is assumed to be right } __host__ __device__ void blinnPhongFSImpl(fragment* depthbuffer, int index, uniforms* u_variables, pipelineOpts opts) { //TODO: Implement light color shading fragment frag = depthbuffer[index]; glm::vec3 baseColor = frag.color; frag.color *= u_variables->blinnPhongParams.x;//Ambient term always present float NdotL = glm::max(glm::dot(frag.normal,frag.lightDir),0.0f); if (NdotL > 0.0f) { glm::vec3 diffuseColor = u_variables->diffuseColor; if(opts.showTriangleColors) diffuseColor = baseColor; frag.color += u_variables->blinnPhongParams.y * u_variables->lightColor * diffuseColor * NdotL; float NdotHV = glm::max(glm::dot(frag.normal,frag.halfVector),0.0f); glm::vec3 specularColor = u_variables->specularColor; if(opts.showTriangleColors) specularColor = baseColor; frag.color += u_variables->blinnPhongParams.z * u_variables->lightColor * specularColor * glm::pow(NdotHV, u_variables->shininess); } depthbuffer[index] = frag; } __host__ __device__ void normalFSImpl(fragment* depthbuffer, int index, uniforms* u_variables, pipelineOpts opts) { glm::vec3 color = depthbuffer[index].normal; color.x = abs(color.x); color.y = abs(color.y); color.z = abs(color.z); depthbuffer[index].color = color; } __global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution, uniforms* u_variables, pipelineOpts opts) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ if(depthbuffer[index].depth < MAX_DEPTH){ //normalFSImpl(depthbuffer, index, u_variables, opts); switch(opts.fShaderProgram) { case DEPTH_SHADING: depthFSImpl(depthbuffer, index, u_variables, opts); break; case AMBIENT_LIGHTING: ambientFSImpl(depthbuffer, index, u_variables, opts); break; case NORMAL_SHADING: normalFSImpl(depthbuffer, index, u_variables, opts); break; case BLINN_PHONG_SHADING: blinnPhongFSImpl(depthbuffer, index, u_variables, opts); break; } } } } //Writes fragment colors to the framebuffer __global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ if(depthbuffer[index].depth < MAX_DEPTH){//Only framebuffer[index] = depthbuffer[index].color; } } } __global__ void binRasterizationKernel(triangle* primitives, int* primitiveStageBuffer, int NPrimitives, int* bufferCounters, int* binBuffers, int binBufferSize, glm::vec2 resolution, glm::vec2 binDims, pipelineOpts opts) { extern __shared__ int s[]; int *sBufferCounters = s; int numBins = binDims.x*binDims.y; int *sBatchNum = &s[numBins]; //threadIdx.x is id within batch int indexInBatch = threadIdx.x; int numBatchesPerBlock = blockDim.x; int binWidth = ceil(resolution.x/binDims.x); int binHeight = ceil(resolution.y/binDims.y); int indexInBlock = threadIdx.x+threadIdx.y*blockDim.x; //Initialize counters if(indexInBlock < numBins) sBufferCounters[indexInBlock] = 0; if(indexInBlock < blockDim.x) sBatchNum[indexInBlock] = 0; __syncthreads(); while(sBatchNum[indexInBatch] < numBatchesPerBlock) { //Get a batch int batchId = atomicAdd(&sBatchNum[indexInBatch], 1); if(batchId < numBatchesPerBlock){ int stageBufferIndex = indexInBatch + blockDim.x*(batchId*gridDim.x+blockIdx.x); if(stageBufferIndex < NPrimitives) { int triangleIndex = primitiveStageBuffer[stageBufferIndex]; if(triangleIndex >= 0 && triangleIndex < NPrimitives){ glm::vec3 minpoint,maxpoint; transformTriToScreenSpace(primitives[triangleIndex], resolution); getAABBForTriangle(primitives[triangleIndex], minpoint, maxpoint); for(int x = 0; x < binDims.x; ++x) { for(int y = 0; y < binDims.y; ++y) { if(isAABBInBin(minpoint, maxpoint, x*binWidth, (x+1)*binWidth, y*binHeight, (y+1)*binHeight)) { int binIndex = x+y*binDims.x; int bufLoc = atomicAdd(&sBufferCounters[binIndex], 1); if(bufLoc < binBufferSize){ int binBufferIndex = bufLoc + binIndex*binBufferSize + blockIdx.x*(numBins*binBufferSize); binBuffers[binBufferIndex] = triangleIndex; }else{ //ERROR Overflow } } } } } } } } __syncthreads(); if(indexInBlock < numBins) bufferCounters[numBins*blockIdx.x + indexInBlock] = sBufferCounters[indexInBlock]; } __global__ void coarseRasterizationKernel(triangle* primitives, int NPrimitives, glm::vec2 resolution, int* binBufferCounters, int* binBuffers, int binBufferSize, int* tileBuffers, int tileBufferSize, int* tileBufferCounters, int numTilesX, int numTilesY, int tileSize, int numBinBlocks) { extern __shared__ int s[]; int* sTriIndexBuffer = s; int numTilesInBin = blockDim.x*blockDim.y; glm::vec4* sAABBBuffer = (glm::vec4*) &sTriIndexBuffer[numTilesInBin]; int binIndex = blockIdx.x + blockIdx.y*gridDim.x; int tileXIndex = blockIdx.x*blockDim.x+threadIdx.x;//Bin.X*tilesPerBin.x+tileXInBinIndex int tileYIndex = blockIdx.y*blockDim.y+threadIdx.y;//Bin.Y*tilesPerBin.y+tileYInBinIndex int numBins = gridDim.x*gridDim.y; int indexInBin = threadIdx.x+threadIdx.y*blockDim.x; int tileBufferCounter = 0; int tileBufferOffset = tileBufferSize*(tileXIndex+tileYIndex*numTilesX); glm::vec4 tileAABB = glm::vec4(tileXIndex*tileSize, tileYIndex*tileSize, (tileXIndex+1)*tileSize, (tileYIndex+1)*tileSize); //Prefetch triangles and calculate bounding boxes in compact fashion //Iterate by binRaster Block for(int binBlock = 0; binBlock < numBinBlocks; ++binBlock) { int bufferOffset = 0; int bufferSize = binBufferCounters[binIndex+binBlock*numBins]; do{ if(indexInBin < bufferSize) { int binBufferOffset = binIndex*binBufferSize + binBlock*(numBins*binBufferSize); int triIndex = binBuffers[(bufferOffset + indexInBin) + binBufferOffset]; sTriIndexBuffer[indexInBin] = triIndex; glm::vec4 mXmYMXMY(0,0,0,0); if(triIndex >= 0 && triIndex < NPrimitives) getCompact2DAABBForTriangle(primitives[triIndex], mXmYMXMY); sAABBBuffer[indexInBin] = mXmYMXMY; }else{ sTriIndexBuffer[indexInBin] = -1; } //TODO: Do this more safely in shared memory. bufferOffset = bufferOffset + numTilesInBin; bufferSize = bufferSize - numTilesInBin; __syncthreads();//Prefetch complete, wait for everyone else to catch up //For each triangle, put in correct tile for(int tri = 0; tri < numTilesInBin; ++tri) { if(sTriIndexBuffer[tri] >= 0){ if(doCompactAABBsintersect(tileAABB, sAABBBuffer[tri])) { if(tileBufferCounter < tileBufferSize){ tileBuffers[tileBufferOffset + tileBufferCounter] = sTriIndexBuffer[tri]; tileBufferCounter++; }else{ //ERROR Overflow } } } } __syncthreads(); }while(bufferSize > 0); } //Write out tile counts tileBufferCounters[tileXIndex+tileYIndex*numTilesX] = tileBufferCounter; } //TODO: Do this a lot more efficiently and in parallel __global__ void fineRasterizationKernel(triangle* primitives, int NPrimitives, glm::vec2 resolution, int* tileBuffers, int tileBufferSize, int* tileBufferCounters, fragment* depthbuffer) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; int tileIndex = blockIdx.x+blockIdx.y*gridDim.x; int pixelX = blockIdx.x*blockDim.x+threadIdx.x;//TileX*tilesize+threadIdx.x int pixelY = blockIdx.y*blockDim.y+threadIdx.y;//TileY*tilesize+threadIdx.y if(pixelX >= 0 && pixelX < resolution.x && pixelY >= 0 && pixelY < resolution.y) { //Pixel is on screen. //Each thread has exclusive access to this location in the depth buffer so no complex atomics needed. int dbIndex = pixelX+pixelY*resolution.x;//Depth buffer location //tileXIndex+tileYIndex*numTilesX int tileIndex = blockIdx.x+blockIdx.y*gridDim.x; int triCount = tileBufferCounters[tileIndex]; //For each triangle in queue for(int t = 0; t < triCount; ++t) { //(tileIndex)*tileBufferSize+tri; int triIndex = tileBuffers[tileIndex*tileBufferSize+t]; if(triIndex >= 0 && triIndex < NPrimitives){ triangle tri = primitives[triIndex]; //Already in screen space (from bin rasterizer) glm::vec3 minPoint; glm::vec3 maxPoint; getAABBForTriangle(tri, minPoint, maxPoint); if(isPixelInAABB(pixelX, pixelY, minPoint, maxPoint)) { fragment frag; frag.primitiveIndex = index; frag.position.x = pixelX; frag.position.y = pixelY; glm::vec3 bCoords = calculateBarycentricCoordinate(tri, glm::vec2(pixelX,pixelY)); if(isBarycentricCoordInBounds(bCoords)) { frag.depth = tri.v0.pos.z*bCoords.x+tri.v1.pos.z*bCoords.y+tri.v2.pos.z*bCoords.z; frag.primitiveIndex = triIndex; if(frag.depth > 0.0f && frag.depth < 1.0f) { //Depth test if(frag.depth < depthbuffer[dbIndex].depth) { //Only continue if depth test passes. frag.color = tri.v0.color*bCoords.x+tri.v1.color*bCoords.y+tri.v2.color*bCoords.z; frag.normal = glm::normalize(tri.v0.eyeNormal*bCoords.x+tri.v1.eyeNormal*bCoords.y+tri.v2.eyeNormal*bCoords.z); frag.lightDir = glm::normalize(tri.v0.eyeLightDirection*bCoords.x+tri.v1.eyeLightDirection*bCoords.y+tri.v2.eyeLightDirection*bCoords.z); frag.halfVector = glm::normalize(tri.v0.eyeHalfVector*bCoords.x+tri.v1.eyeHalfVector*bCoords.y+tri.v2.eyeHalfVector*bCoords.z); writeToDepthbuffer(pixelX,pixelY, frag, depthbuffer,resolution); } } } } } } } } void binRasterizer(int NPrimitives, glm::vec2 resolution, pipelineOpts opts) { glm::vec2 binDims = glm::vec2(5,5); //Tuning params int binBufferSize = 2<<6; int batchSize = 32;//One batch per warp int numBatches = ceil(NPrimitives/float(batchSize)); int numBlocks = max(ceil(numBatches*batchSize/float(1024)), ceil(numBatches*batchSize*26/float(8192)));//26 is number of registers for kernel int batchesPerBlock = ceil(numBatches/float(numBlocks)); //Allocate bin buffers hipMalloc((void**) &binBuffers, numBlocks*binDims.x*binDims.y*(binBufferSize)*sizeof(int)); hipMalloc((void**) &bufferCounters, numBlocks*binDims.x*binDims.y*sizeof(int)); dim3 blockDims(batchSize, batchesPerBlock); dim3 gridDims(numBlocks); int Ns = (binDims.x*binDims.y+batchSize)*sizeof(int); hipLaunchKernelGGL(( binRasterizationKernel), dim3(gridDims),dim3(blockDims),Ns, 0, primitives, primitiveStageBuffer, NPrimitives, bufferCounters, binBuffers, binBufferSize, resolution, binDims, opts); //==============COARSE RASTER=================== int tilesize = 8;//8x8 int tileBufferSize = 2<<6; int numTilesX = ceil(resolution.x/float(tilesize)); int numTilesY = ceil(resolution.y/float(tilesize)); int numTilesPerBinX = ceil(numTilesX/float(binDims.x)); int numTilesPerBinY = ceil(numTilesY/float(binDims.y)); hipMalloc((void**) &tileBuffers, numTilesX*numTilesY*tileBufferSize*sizeof(int)); hipMalloc((void**) &tileBufferCounters, numTilesX*numTilesY*sizeof(int)); dim3 coarseGridDims(binDims.x, binDims.y); dim3 coarseBlockDims(numTilesPerBinX,numTilesPerBinY); Ns = (sizeof(glm::vec4)+sizeof(int))*numTilesPerBinX*numTilesPerBinY; hipLaunchKernelGGL(( coarseRasterizationKernel), dim3(coarseGridDims),dim3(coarseBlockDims),Ns, 0, primitives, NPrimitives, resolution, bufferCounters, binBuffers, binBufferSize, tileBuffers, tileBufferSize, tileBufferCounters, numTilesX, numTilesY, tilesize, numBlocks); hipFree(binBuffers);//Free previous buffer //int* debug = (int*)malloc(numTilesX*numTilesY*sizeof(int)); //hipMemcpy( debug, tileBufferCounters, numTilesX*numTilesY*sizeof(int), hipMemcpyDeviceToHost); //free(debug); //==============FINE RASTER===================== dim3 tileDims(tilesize,tilesize); dim3 numTiles(numTilesX,numTilesY); Ns = 0; hipLaunchKernelGGL(( fineRasterizationKernel), dim3(numTiles),dim3(tileDims),Ns, 0, primitives, NPrimitives, resolution, tileBuffers, tileBufferSize, tileBufferCounters, depthbuffer); hipFree(tileBufferCounters); hipFree(tileBuffers); hipFree(bufferCounters); } __global__ void countPixelsPerTri(triangle* primitives, int* primitiveStageBuffer, int primitivesCount, glm::vec2 resolution, int* pixelsPerTri) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<primitivesCount){ pixelsPerTri[index] = -1; int triIndex = primitiveStageBuffer[index]; if(triIndex >= 0){ //For each primitive //Load triangle localy triangle tri = primitives[triIndex]; int pixelsInside=0; //AABB for triangle glm::vec3 minPoint; glm::vec3 maxPoint; //transformTriToScreenSpace(tri, resolution); // Already done getAABBForTriangle(tri, minPoint, maxPoint); //Compute pixel range //Do some per-fragment clipping and restrict to screen space int minX = glm::max(glm::floor(minPoint.x),0.0f); int maxX = glm::min(glm::ceil(maxPoint.x),resolution.x); int minY = glm::max(glm::floor(minPoint.y),0.0f); int maxY = glm::min(glm::ceil(maxPoint.y),resolution.y); for(int x = minX; x <= maxX; ++x) { for(int y = minY; y <= maxY; ++y) { int dbindex = getDepthBufferIndex(x,y,resolution); if(dbindex < 0) continue; glm::vec3 bCoords = calculateBarycentricCoordinate(tri, glm::vec2(x,y)); if(isBarycentricCoordInBounds(bCoords)) { pixelsInside++; } } } pixelsPerTri[triIndex] = pixelsInside; } } } int* pixelsPerTri; void calculateMetrics(PerformanceMetrics &metrics, triangle* primitives, int* primitiveStageBuffer, int NPrimitives, glm::vec2 resolution, dim3 primitiveBlocks, dim3 tileSize, pipelineOpts opts) { pixelsPerTri = NULL; hipMalloc((void**)&pixelsPerTri, NPrimitives*sizeof(int)); hipLaunchKernelGGL(( countPixelsPerTri), dim3(primitiveBlocks), dim3(tileSize), 0, 0, primitives, primitiveStageBuffer, NPrimitives, resolution, pixelsPerTri); int* pixelCounts = new int[NPrimitives]; hipMemcpy( pixelCounts, pixelsPerTri, NPrimitives*sizeof(int), hipMemcpyDeviceToHost); //TODO Assemble stats in CPU for simplicity. float avgPixelsPerTri = 0.0f; int maxPixelsPerTri = 0; int NPrimitivesRendered = NPrimitives; for(int i = 0; i < NPrimitives; i++) { maxPixelsPerTri = max(maxPixelsPerTri, pixelCounts[i]); if(pixelCounts[i] < 0) { NPrimitivesRendered--; }else{ avgPixelsPerTri += pixelCounts[i]; } } metrics.NumTriangles = NPrimitives; if(NPrimitives != 0){ metrics.avgPixelsPerTri = avgPixelsPerTri/NPrimitivesRendered; metrics.maxPixelsPerTri = maxPixelsPerTri;//Record worst case } metrics.NumTrianglesRastered = NPrimitivesRendered; delete pixelCounts; hipFree(pixelsPerTri); } // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRasterizeCore(uchar4* PBOpos, glm::vec2 resolution, float frame, float* vbo, int vbosize, float* nbo, int nbosize, float* cbo, int cbosize, int* ibo, int ibosize, uniforms u_variables, pipelineOpts opts, PerformanceMetrics &metrics) { // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize))); //set up framebuffer framebuffer = NULL; hipMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3)); //set up depthbuffer depthbuffer = NULL; hipMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment)); //kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states hipLaunchKernelGGL(( clearImage), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, framebuffer, glm::vec3(0,0,0)); fragment frag; frag.color = glm::vec3(0.0f); frag.normal = glm::vec3(0.0f); frag.position = glm::vec2(0.0f,0.0f); frag.depth = MAX_DEPTH; hipLaunchKernelGGL(( clearDepthBuffer), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer); //------------------------------ //memory stuff //------------------------------ primitives = NULL; hipMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle)); primitiveStageBuffer = NULL; hipMalloc((void**)&primitiveStageBuffer, (ibosize/3)*sizeof(int)); verticies = NULL; hipMalloc((void**)&verticies, (vbosize)*sizeof(vertex)); device_uniforms = NULL; hipMalloc((void**)&device_uniforms, sizeof(uniforms)); hipMemcpy( device_uniforms, &u_variables, sizeof(uniforms), hipMemcpyHostToDevice); device_ibo = NULL; hipMalloc((void**)&device_ibo, ibosize*sizeof(int)); hipMemcpy( device_ibo, ibo, ibosize*sizeof(int), hipMemcpyHostToDevice); device_vbo = NULL; hipMalloc((void**)&device_vbo, vbosize*sizeof(float)); hipMemcpy( device_vbo, vbo, vbosize*sizeof(float), hipMemcpyHostToDevice); device_nbo = NULL; hipMalloc((void**)&device_nbo, nbosize*sizeof(float)); hipMemcpy( device_nbo, nbo, nbosize*sizeof(float), hipMemcpyHostToDevice); device_cbo = NULL; hipMalloc((void**)&device_cbo, cbosize*sizeof(float)); hipMemcpy( device_cbo, cbo, cbosize*sizeof(float), hipMemcpyHostToDevice); tileSize = 32; int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize)); //------------------------------ //vertex shader //------------------------------ hipLaunchKernelGGL(( vertexShadeKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_nbo, nbosize, device_cbo, cbosize, verticies, device_uniforms, opts); checkCUDAError("Kernel failed VS!"); hipDeviceSynchronize(); //------------------------------ //primitive assembly //------------------------------ primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize)); hipLaunchKernelGGL(( primitiveAssemblyKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, verticies, device_ibo, ibosize, primitives, primitiveStageBuffer, device_uniforms, opts); hipDeviceSynchronize(); checkCUDAError("Kernel failed PA!"); int NPrimitives = ibosize/3; if(opts.backfaceCulling) { hipLaunchKernelGGL(( backfaceCulling), dim3(primitiveBlocks), dim3(tileSize), 0, 0, primitives, primitiveStageBuffer, NPrimitives, opts); } if(opts.totalClipping) { hipLaunchKernelGGL(( totalClipping), dim3(primitiveBlocks), dim3(tileSize), 0, 0, primitives, primitiveStageBuffer, NPrimitives, opts); } //------------------------------ //rasterization //------------------------------ LARGE_INTEGER beginTime; QueryPerformanceCounter( &beginTime ); // Code to measure ... if(opts.rasterMode == NAIVE) { hipLaunchKernelGGL(( rasterizationKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, primitives, primitiveStageBuffer, NPrimitives, depthbuffer, resolution, device_uniforms, opts); }else if(opts.rasterMode == BIN){ binRasterizer(NPrimitives, resolution, opts); } hipDeviceSynchronize(); checkCUDAError("Kernel failed Raster!"); LARGE_INTEGER endTime; QueryPerformanceCounter( &endTime ); LARGE_INTEGER timerFreq; QueryPerformanceFrequency( &timerFreq ); const double freq = 1.0f / timerFreq.QuadPart; const double timeSeconds = ( endTime.QuadPart - beginTime.QuadPart )* freq;; metrics.rasterTimeSeconds = timeSeconds; if(opts.recordMetrics){ //Calculate metrics //TODO: Add more metrics calculateMetrics(metrics, primitives, primitiveStageBuffer, NPrimitives, resolution, primitiveBlocks, tileSize, opts); } //------------------------------ //fragment shader //------------------------------ hipLaunchKernelGGL(( fragmentShadeKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, depthbuffer, resolution, device_uniforms, opts); hipDeviceSynchronize(); checkCUDAError("Kernel failed FS!"); //------------------------------ //write fragments to framebuffer //------------------------------ hipLaunchKernelGGL(( render), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer, framebuffer); hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, resolution, framebuffer); hipDeviceSynchronize(); kernelCleanup(); checkCUDAError("Kernel failed!"); } //close to vertex __global__ void sphereCenterShadeKernel(float* vbo, int vbosize, float* cbo, int cbosize, sphere* sp, uniforms* u_variables, pipelineOpts opts){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<vbosize/4){ sphere vOut; glm::vec4 vertexEyePos = glm::vec4(vbo[index*4+0],vbo[index*4+1],vbo[index*4+2],1.0); vertexEyePos = u_variables->viewTransform*u_variables->modelTransform*vertexEyePos; //Compute lighting vectors /*glm::vec4 eyeLightPos = u_variables->viewTransform*u_variables->lightPos; glm::vec4 eyeLightDir = (eyeLightPos - vertexEyePos); glm::vec4 halfVector = (eyeLightDir - vertexEyePos);*/ //Normals are in eye space /* glm::vec4 vertexEyeNorm = glm::vec4(nbo[index*3+0],nbo[index*3+1],nbo[index*3+2],0.0); vertexEyeNorm = u_variables->viewTransform*u_variables->modelTransform*vertexEyeNorm; glm::vec3 vertexColor = glm::vec3(cbo[(index%3)*3+0],cbo[(index%3)*3+1],cbo[(index%3)*3+2]);*/ //Apply perspective matrix and perspective division glm::vec4 pos = u_variables->perspectiveTransform*vertexEyePos; pos.x /= pos.w; pos.y /= pos.w; pos.z /= pos.w; vOut.r = vbo[index*4+3] / pos.w; //Emit vertex vOut.center = glm::vec3(pos); //vOut.eyeLightDirection = glm::vec3(eyeLightDir); vOut.color = glm::vec3(cbo[3*index], cbo[3*index + 1], cbo[3*index + 2]); sp[index] = vOut; } } //TODO: Do this a lot more efficiently and in parallel __global__ void rasterizationKernelSphere(sphere* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, uniforms* u_variables, pipelineOpts opts) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<primitivesCount){ if(index >= 0){ //For each primitive //Load triangle localy int rx; int ry; transformSphereToScreenSpace(primitives[index], resolution, rx, ry); sphere sp = primitives[index]; //AABB for triangle glm::vec3 minPoint; glm::vec3 maxPoint; getAABBForSphere(sp, minPoint, maxPoint, rx, ry); //Compute pixel range //Do some per-fragment clipping and restrict to screen space int minX = max(minPoint.x,0.0f); int maxX = min(maxPoint.x,resolution.x); int minY = max(minPoint.y,0.0f); int maxY = min(maxPoint.y,resolution.y); //minX = 600, maxX = 800; //minY = 155, maxY = 214; fragment frag; frag.primitiveIndex = index; //TODO: Do something more efficient than this float rsq = sp.r * sp.r; for(int x = minX; x <= maxX; ++x) { for(int y = minY; y <= maxY; ++y) { int dbindex = getDepthBufferIndex(x,y,resolution); if(dbindex < 0) continue; frag.position.x = x; frag.position.y = y; glm::vec2 dis2 = glm::vec2(x,y) - glm::vec2(sp.center.x, sp.center.y); float dis2s = dis2.x * dis2.x + dis2.y * dis2.y; if(dis2s < rx * ry) { //Blend values. frag.depth = sp.center.z;// - glm::sqrt(rsq - dis * dis); if(frag.depth > 0.0f && frag.depth < 1.0f) { //Only continue if pixel is in screen. //TODO frag.normal = glm::normalize(glm::vec3(x, y, frag.depth) - sp.center); frag.color = glm::vec3(1,0,0); //frag.lightDir = glm::normalize(tri.v0.eyeLightDirection*bCoords.x+tri.v1.eyeLightDirection*bCoords.y+tri.v2.eyeLightDirection*bCoords.z); //frag.halfVector = glm::normalize(tri.v0.eyeHalfVector*bCoords.x+tri.v1.eyeHalfVector*bCoords.y+tri.v2.eyeHalfVector*bCoords.z); fatomicMin(&(depthbuffer[dbindex].depthPrimTag),frag.depthPrimTag); if(frag.depthPrimTag == depthbuffer[dbindex].depthPrimTag)//If this is true, we won the race condition writeToDepthbuffer(x,y,frag, depthbuffer,resolution); } } //int dbindex = getDepthBufferIndex(x,y,resolution); //frag.depth = sp.center.z;// - glm::sqrt(rsq - dis * dis); // if(frag.depth > 0.0f && frag.depth < 1.0f) // { // //Only continue if pixel is in screen. // //TODO // frag.color = glm::vec3(5,0,0); // frag.normal = glm::normalize(glm::vec3(x, y, frag.depth) - sp.center); // //frag.lightDir = glm::normalize(tri.v0.eyeLightDirection*bCoords.x+tri.v1.eyeLightDirection*bCoords.y+tri.v2.eyeLightDirection*bCoords.z); // //frag.halfVector = glm::normalize(tri.v0.eyeHalfVector*bCoords.x+tri.v1.eyeHalfVector*bCoords.y+tri.v2.eyeHalfVector*bCoords.z); // fatomicMin(&(depthbuffer[dbindex].depthPrimTag),frag.depthPrimTag); // if(frag.depthPrimTag == depthbuffer[dbindex].depthPrimTag)//If this is true, we won the race condition // writeToDepthbuffer(x,y,frag, depthbuffer,resolution); // } } } } } } // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaSphereRasterizeCore(uchar4* PBOpos, glm::vec2 resolution, float frame, float* vbo, int vbosize, float* cbo, int cbosize, uniforms u_variables, pipelineOpts opts, PerformanceMetrics &metrics) { // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize))); //set up framebuffer framebuffer = NULL; hipMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3)); //set up depthbuffer depthbuffer = NULL; hipMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment)); //kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states hipLaunchKernelGGL(( clearImage), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, framebuffer, glm::vec3(0,0,0)); fragment frag; frag.color = glm::vec3(0.0f); frag.normal = glm::vec3(0.0f); frag.position = glm::vec2(0.0f,0.0f); frag.depth = MAX_DEPTH; hipLaunchKernelGGL(( clearDepthBuffer), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer); //------------------------------ //memory stuff //------------------------------ //primitives = NULL; //hipMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle)); //primitiveStageBuffer = NULL; //hipMalloc((void**)&primitiveStageBuffer, (ibosize/3)*sizeof(int)); spheres = NULL; hipMalloc((void**)&spheres, (vbosize)*sizeof(sphere)); device_uniforms = NULL; hipMalloc((void**)&device_uniforms, sizeof(uniforms)); hipMemcpy( device_uniforms, &u_variables, sizeof(uniforms), hipMemcpyHostToDevice); /*device_ibo = NULL; hipMalloc((void**)&device_ibo, ibosize*sizeof(int)); hipMemcpy( device_ibo, ibo, ibosize*sizeof(int), hipMemcpyHostToDevice);*/ device_vbo = NULL; hipMalloc((void**)&device_vbo, vbosize*sizeof(float)); hipMemcpy( device_vbo, vbo, vbosize*sizeof(float), hipMemcpyHostToDevice); /*device_nbo = NULL; hipMalloc((void**)&device_nbo, nbosize*sizeof(float)); hipMemcpy( device_nbo, nbo, nbosize*sizeof(float), hipMemcpyHostToDevice);*/ device_cbo = NULL; hipMalloc((void**)&device_cbo, cbosize*sizeof(float)); hipMemcpy( device_cbo, cbo, cbosize*sizeof(float), hipMemcpyHostToDevice); tileSize = 32; int primitiveBlocks = ceil(((float)vbosize/4)/((float)tileSize)); //------------------------------ //vertex shader //------------------------------ hipLaunchKernelGGL(( sphereCenterShadeKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_cbo, cbosize, spheres, device_uniforms, opts); checkCUDAError("Kernel failed VS!"); hipDeviceSynchronize(); //------------------------------ //primitive assembly //------------------------------ //seems no need for sphere //primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize)); //primitiveAssemblyKernel<<<primitiveBlocks, tileSize>>>(verticies, device_ibo, ibosize, primitives, primitiveStageBuffer, device_uniforms, opts); //hipDeviceSynchronize(); //checkCUDAError("Kernel failed PA!"); //no need for backface culling /*int NPrimitives = ibosize/3; if(opts.backfaceCulling) { hipLaunchKernelGGL(( backfaceCulling), dim3(primitiveBlocks), dim3(tileSize), 0, 0, primitives, primitiveStageBuffer, NPrimitives, opts); } if(opts.totalClipping) { hipLaunchKernelGGL(( totalClipping), dim3(primitiveBlocks), dim3(tileSize), 0, 0, primitives, primitiveStageBuffer, NPrimitives, opts); }*/ //------------------------------ //rasterization //------------------------------ LARGE_INTEGER beginTime; QueryPerformanceCounter( &beginTime ); // Code to measure ... if(opts.rasterMode == NAIVE) { hipLaunchKernelGGL(( rasterizationKernelSphere), dim3(primitiveBlocks), dim3(tileSize), 0, 0, spheres, vbosize / 4, depthbuffer, resolution, device_uniforms, opts); }else if(opts.rasterMode == BIN){ //binRasterizer(NPrimitives, resolution, opts); } hipDeviceSynchronize(); checkCUDAError("Kernel failed Raster!"); LARGE_INTEGER endTime; QueryPerformanceCounter( &endTime ); LARGE_INTEGER timerFreq; QueryPerformanceFrequency( &timerFreq ); const double freq = 1.0f / timerFreq.QuadPart; const double timeSeconds = ( endTime.QuadPart - beginTime.QuadPart )* freq;; metrics.rasterTimeSeconds = timeSeconds; if(opts.recordMetrics){ //Calculate metrics //TODO: Add more metrics //calculateMetrics(metrics, primitives, primitiveStageBuffer, vbosize, resolution, primitiveBlocks, tileSize, opts); } //------------------------------ //fragment shader //------------------------------ hipLaunchKernelGGL(( fragmentShadeKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, depthbuffer, resolution, device_uniforms, opts); hipDeviceSynchronize(); checkCUDAError("Kernel failed FS!"); //------------------------------ //write fragments to framebuffer //------------------------------ hipLaunchKernelGGL(( render), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer, framebuffer); hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, resolution, framebuffer); hipDeviceSynchronize(); kernelCleanup(); checkCUDAError("Kernel failed!"); } void kernelCleanup(){ hipFree( primitives ); hipFree( device_vbo ); hipFree( device_nbo); hipFree( device_cbo ); hipFree( device_ibo ); hipFree( framebuffer ); hipFree( depthbuffer ); hipFree( verticies ); hipFree( device_uniforms); hipFree(primitiveStageBuffer); }
4aa2026b8633e9964a6deb0f9294e8e17d4a4e52.cu
// CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania #include "rasterizeKernels.h" #include "rasterizeTools.h" //#include "cuda_runtime.h" glm::vec3* framebuffer; fragment* depthbuffer; float* device_vbo; float* device_nbo; float* device_cbo; int* device_ibo; vertex* verticies; triangle* primitives; sphere* spheres; int* primitiveStageBuffer; uniforms* device_uniforms; int* binBuffers; int* bufferCounters; int* tileBuffers; int* tileBufferCounters; void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } //Handy dandy little hashing function that provides seeds for random number generation __host__ __device__ unsigned int hash(unsigned int a){ a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } //Writes a given fragment to a fragment buffer at a given location __host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; depthbuffer[index] = frag; } } //Reads a fragment from a given location in a fragment buffer __host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return depthbuffer[index]; }else{ fragment f; return f; } } __host__ __device__ int getDepthBufferIndex(int x, int y, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y && x>=0 && y >= 0) return (y*resolution.x) + x; return -1; } __host__ __device__ float getDepthFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return depthbuffer[index].depth; }else{ return 0; } } __device__ unsigned long long int fatomicMin(unsigned long long int * addr, unsigned long long int value) { unsigned long long ret = *addr; while(value < ret) { unsigned long long old = ret; if((ret = atomicCAS(addr, old, value)) == old) break; } return ret; } //Writes a given pixel to a pixel buffer at a given location __host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; framebuffer[index] = value; } } //Reads a pixel from a pixel buffer at a given location __host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return framebuffer[index]; }else{ return glm::vec3(0,0,0); } } //Kernel that clears a given pixel buffer with a given color __global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = color; } } //Kernel that clears a given fragment buffer depth only. Everything else is ignored because it will be overwritten later __global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ buffer[index].depth= MAX_DEPTH; } } //Kernel that clears a given fragment buffer with a given fragment __global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ fragment f = frag; f.position.x = x; f.position.y = y; buffer[index] = f; } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } __global__ void vertexShadeKernel(float* vbo, int vbosize, float* nbo, int nbosize, float* cbo, int cbosize, vertex* verticies, uniforms* u_variables, pipelineOpts opts){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<vbosize/3){ vertex vOut; glm::vec4 vertexEyePos = glm::vec4(vbo[index*3+0],vbo[index*3+1],vbo[index*3+2],1.0); vertexEyePos = u_variables->viewTransform*u_variables->modelTransform*vertexEyePos; //Compute lighting vectors glm::vec4 eyeLightPos = u_variables->viewTransform*u_variables->lightPos; glm::vec4 eyeLightDir = (eyeLightPos - vertexEyePos); glm::vec4 halfVector = (eyeLightDir - vertexEyePos); //Normals are in eye space glm::vec4 vertexEyeNorm = glm::vec4(nbo[index*3+0],nbo[index*3+1],nbo[index*3+2],0.0); vertexEyeNorm = u_variables->viewTransform*u_variables->modelTransform*vertexEyeNorm; glm::vec3 vertexColor = glm::vec3(cbo[(index%3)*3+0],cbo[(index%3)*3+1],cbo[(index%3)*3+2]); //Apply perspective matrix and perspective division glm::vec4 pos = u_variables->perspectiveTransform*vertexEyePos; pos.x /= pos.w; pos.y /= pos.w; pos.z /= pos.w; //Emit vertex vOut.pos = glm::vec3(pos); vOut.eyeNormal = glm::normalize(glm::vec3(vertexEyeNorm)); vOut.eyeHalfVector = glm::normalize(glm::vec3(halfVector)); vOut.eyeLightDirection = glm::vec3(eyeLightDir); vOut.color = vertexColor; verticies[index] = vOut; } } //TODO: Implement primitive assembly __global__ void primitiveAssemblyKernel(vertex* verticies, int* ibo, int ibosize, triangle* primitives, int* primitiveStageBuffer, uniforms* u_variables, pipelineOpts opts) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; int primitivesCount = ibosize/3; if(index<primitivesCount){ //3 floats per vert, 3 verts per triangle triangle primitive; //Load verticies int vertIndex = ibo[index*3+0]; primitive.v0 = verticies[vertIndex]; vertIndex = ibo[index*3+1]; primitive.v1 = verticies[vertIndex]; vertIndex = ibo[index*3+2]; primitive.v2 = verticies[vertIndex]; //Write back primitive primitives[index] = primitive; primitiveStageBuffer[index] = index;//Throw triangle into buffer } } __global__ void backfaceCulling(triangle* primitives, int* primitiveStageBuffer, int NPrimitives, pipelineOpts opts) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < NPrimitives) { int primIndex = primitiveStageBuffer[index]; if(primIndex >= 0 && primIndex < NPrimitives){ triangle tri = primitives[primIndex]; float ux = tri.v1.pos.x-tri.v0.pos.x; float uy = tri.v1.pos.y-tri.v0.pos.y; float vx = tri.v2.pos.x-tri.v0.pos.x; float vy = tri.v2.pos.y-tri.v0.pos.y; float facing = ux*vy-uy*vx; if(facing < 0.0) { //Backface. Cull it. primitiveStageBuffer[index] = -1; } } } } __global__ void totalClipping(triangle* primitives, int* primitiveStageBuffer, int NPrimitives, pipelineOpts opts) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < NPrimitives) { int primIndex = primitiveStageBuffer[index]; if(primIndex >= 0 && primIndex < NPrimitives){ triangle tri = primitives[primIndex]; glm::vec3 minpoint, maxpoint; getAABBForTriangle(tri, minpoint,maxpoint); if(!isAABBInClipSpace(minpoint, maxpoint)) { //Backface. Cull it. primitiveStageBuffer[index] = -1; } } } } //TODO: Do this a lot more efficiently and in parallel __global__ void rasterizationKernel(triangle* primitives, int* primitiveStageBuffer, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, uniforms* u_variables, pipelineOpts opts) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<primitivesCount){ int triIndex = primitiveStageBuffer[index]; if(triIndex >= 0){ //For each primitive //Load triangle localy transformTriToScreenSpace(primitives[triIndex], resolution); triangle tri = primitives[triIndex]; //AABB for triangle glm::vec3 minPoint; glm::vec3 maxPoint; getAABBForTriangle(tri, minPoint, maxPoint); //Compute pixel range //Do some per-fragment clipping and restrict to screen space int minX = glm::max(glm::floor(minPoint.x),0.0f); int maxX = glm::min(glm::ceil(maxPoint.x),resolution.x); int minY = glm::max(glm::floor(minPoint.y),0.0f); int maxY = glm::min(glm::ceil(maxPoint.y),resolution.y); fragment frag; frag.primitiveIndex = index; //TODO: Do something more efficient than this for(int x = minX; x <= maxX; ++x) { for(int y = minY; y <= maxY; ++y) { int dbindex = getDepthBufferIndex(x,y,resolution); if(dbindex < 0) continue; frag.position.x = x; frag.position.y = y; glm::vec3 bCoords = calculateBarycentricCoordinate(tri, glm::vec2(x,y)); if(isBarycentricCoordInBounds(bCoords)) { //Blend values. frag.depth = tri.v0.pos.z*bCoords.x+tri.v1.pos.z*bCoords.y+tri.v2.pos.z*bCoords.z; if(frag.depth > 0.0f && frag.depth < 1.0f) { //Only continue if pixel is in screen. frag.color = tri.v0.color*bCoords.x+tri.v1.color*bCoords.y+tri.v2.color*bCoords.z; frag.normal = glm::normalize(tri.v0.eyeNormal*bCoords.x+tri.v1.eyeNormal*bCoords.y+tri.v2.eyeNormal*bCoords.z); frag.lightDir = glm::normalize(tri.v0.eyeLightDirection*bCoords.x+tri.v1.eyeLightDirection*bCoords.y+tri.v2.eyeLightDirection*bCoords.z); frag.halfVector = glm::normalize(tri.v0.eyeHalfVector*bCoords.x+tri.v1.eyeHalfVector*bCoords.y+tri.v2.eyeHalfVector*bCoords.z); fatomicMin(&(depthbuffer[dbindex].depthPrimTag),frag.depthPrimTag); if(frag.depthPrimTag == depthbuffer[dbindex].depthPrimTag)//If this is true, we won the race condition writeToDepthbuffer(x,y,frag, depthbuffer,resolution); } } } } } } } __host__ __device__ void depthFSImpl(fragment* depthbuffer, int index, uniforms* u_variables, pipelineOpts opts) { float depth = depthbuffer[index].depth; if(depth < 1.0f) depthbuffer[index].color = glm::vec3(1.0f-depth); } __host__ __device__ void ambientFSImpl(fragment* depthbuffer, int index, uniforms* u_variables, pipelineOpts opts) { //Do nothing. Interpolated color is assumed to be right } __host__ __device__ void blinnPhongFSImpl(fragment* depthbuffer, int index, uniforms* u_variables, pipelineOpts opts) { //TODO: Implement light color shading fragment frag = depthbuffer[index]; glm::vec3 baseColor = frag.color; frag.color *= u_variables->blinnPhongParams.x;//Ambient term always present float NdotL = glm::max(glm::dot(frag.normal,frag.lightDir),0.0f); if (NdotL > 0.0f) { glm::vec3 diffuseColor = u_variables->diffuseColor; if(opts.showTriangleColors) diffuseColor = baseColor; frag.color += u_variables->blinnPhongParams.y * u_variables->lightColor * diffuseColor * NdotL; float NdotHV = glm::max(glm::dot(frag.normal,frag.halfVector),0.0f); glm::vec3 specularColor = u_variables->specularColor; if(opts.showTriangleColors) specularColor = baseColor; frag.color += u_variables->blinnPhongParams.z * u_variables->lightColor * specularColor * glm::pow(NdotHV, u_variables->shininess); } depthbuffer[index] = frag; } __host__ __device__ void normalFSImpl(fragment* depthbuffer, int index, uniforms* u_variables, pipelineOpts opts) { glm::vec3 color = depthbuffer[index].normal; color.x = abs(color.x); color.y = abs(color.y); color.z = abs(color.z); depthbuffer[index].color = color; } __global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution, uniforms* u_variables, pipelineOpts opts) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ if(depthbuffer[index].depth < MAX_DEPTH){ //normalFSImpl(depthbuffer, index, u_variables, opts); switch(opts.fShaderProgram) { case DEPTH_SHADING: depthFSImpl(depthbuffer, index, u_variables, opts); break; case AMBIENT_LIGHTING: ambientFSImpl(depthbuffer, index, u_variables, opts); break; case NORMAL_SHADING: normalFSImpl(depthbuffer, index, u_variables, opts); break; case BLINN_PHONG_SHADING: blinnPhongFSImpl(depthbuffer, index, u_variables, opts); break; } } } } //Writes fragment colors to the framebuffer __global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ if(depthbuffer[index].depth < MAX_DEPTH){//Only framebuffer[index] = depthbuffer[index].color; } } } __global__ void binRasterizationKernel(triangle* primitives, int* primitiveStageBuffer, int NPrimitives, int* bufferCounters, int* binBuffers, int binBufferSize, glm::vec2 resolution, glm::vec2 binDims, pipelineOpts opts) { extern __shared__ int s[]; int *sBufferCounters = s; int numBins = binDims.x*binDims.y; int *sBatchNum = &s[numBins]; //threadIdx.x is id within batch int indexInBatch = threadIdx.x; int numBatchesPerBlock = blockDim.x; int binWidth = ceil(resolution.x/binDims.x); int binHeight = ceil(resolution.y/binDims.y); int indexInBlock = threadIdx.x+threadIdx.y*blockDim.x; //Initialize counters if(indexInBlock < numBins) sBufferCounters[indexInBlock] = 0; if(indexInBlock < blockDim.x) sBatchNum[indexInBlock] = 0; __syncthreads(); while(sBatchNum[indexInBatch] < numBatchesPerBlock) { //Get a batch int batchId = atomicAdd(&sBatchNum[indexInBatch], 1); if(batchId < numBatchesPerBlock){ int stageBufferIndex = indexInBatch + blockDim.x*(batchId*gridDim.x+blockIdx.x); if(stageBufferIndex < NPrimitives) { int triangleIndex = primitiveStageBuffer[stageBufferIndex]; if(triangleIndex >= 0 && triangleIndex < NPrimitives){ glm::vec3 minpoint,maxpoint; transformTriToScreenSpace(primitives[triangleIndex], resolution); getAABBForTriangle(primitives[triangleIndex], minpoint, maxpoint); for(int x = 0; x < binDims.x; ++x) { for(int y = 0; y < binDims.y; ++y) { if(isAABBInBin(minpoint, maxpoint, x*binWidth, (x+1)*binWidth, y*binHeight, (y+1)*binHeight)) { int binIndex = x+y*binDims.x; int bufLoc = atomicAdd(&sBufferCounters[binIndex], 1); if(bufLoc < binBufferSize){ int binBufferIndex = bufLoc + binIndex*binBufferSize + blockIdx.x*(numBins*binBufferSize); binBuffers[binBufferIndex] = triangleIndex; }else{ //ERROR Overflow } } } } } } } } __syncthreads(); if(indexInBlock < numBins) bufferCounters[numBins*blockIdx.x + indexInBlock] = sBufferCounters[indexInBlock]; } __global__ void coarseRasterizationKernel(triangle* primitives, int NPrimitives, glm::vec2 resolution, int* binBufferCounters, int* binBuffers, int binBufferSize, int* tileBuffers, int tileBufferSize, int* tileBufferCounters, int numTilesX, int numTilesY, int tileSize, int numBinBlocks) { extern __shared__ int s[]; int* sTriIndexBuffer = s; int numTilesInBin = blockDim.x*blockDim.y; glm::vec4* sAABBBuffer = (glm::vec4*) &sTriIndexBuffer[numTilesInBin]; int binIndex = blockIdx.x + blockIdx.y*gridDim.x; int tileXIndex = blockIdx.x*blockDim.x+threadIdx.x;//Bin.X*tilesPerBin.x+tileXInBinIndex int tileYIndex = blockIdx.y*blockDim.y+threadIdx.y;//Bin.Y*tilesPerBin.y+tileYInBinIndex int numBins = gridDim.x*gridDim.y; int indexInBin = threadIdx.x+threadIdx.y*blockDim.x; int tileBufferCounter = 0; int tileBufferOffset = tileBufferSize*(tileXIndex+tileYIndex*numTilesX); glm::vec4 tileAABB = glm::vec4(tileXIndex*tileSize, tileYIndex*tileSize, (tileXIndex+1)*tileSize, (tileYIndex+1)*tileSize); //Prefetch triangles and calculate bounding boxes in compact fashion //Iterate by binRaster Block for(int binBlock = 0; binBlock < numBinBlocks; ++binBlock) { int bufferOffset = 0; int bufferSize = binBufferCounters[binIndex+binBlock*numBins]; do{ if(indexInBin < bufferSize) { int binBufferOffset = binIndex*binBufferSize + binBlock*(numBins*binBufferSize); int triIndex = binBuffers[(bufferOffset + indexInBin) + binBufferOffset]; sTriIndexBuffer[indexInBin] = triIndex; glm::vec4 mXmYMXMY(0,0,0,0); if(triIndex >= 0 && triIndex < NPrimitives) getCompact2DAABBForTriangle(primitives[triIndex], mXmYMXMY); sAABBBuffer[indexInBin] = mXmYMXMY; }else{ sTriIndexBuffer[indexInBin] = -1; } //TODO: Do this more safely in shared memory. bufferOffset = bufferOffset + numTilesInBin; bufferSize = bufferSize - numTilesInBin; __syncthreads();//Prefetch complete, wait for everyone else to catch up //For each triangle, put in correct tile for(int tri = 0; tri < numTilesInBin; ++tri) { if(sTriIndexBuffer[tri] >= 0){ if(doCompactAABBsintersect(tileAABB, sAABBBuffer[tri])) { if(tileBufferCounter < tileBufferSize){ tileBuffers[tileBufferOffset + tileBufferCounter] = sTriIndexBuffer[tri]; tileBufferCounter++; }else{ //ERROR Overflow } } } } __syncthreads(); }while(bufferSize > 0); } //Write out tile counts tileBufferCounters[tileXIndex+tileYIndex*numTilesX] = tileBufferCounter; } //TODO: Do this a lot more efficiently and in parallel __global__ void fineRasterizationKernel(triangle* primitives, int NPrimitives, glm::vec2 resolution, int* tileBuffers, int tileBufferSize, int* tileBufferCounters, fragment* depthbuffer) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; int tileIndex = blockIdx.x+blockIdx.y*gridDim.x; int pixelX = blockIdx.x*blockDim.x+threadIdx.x;//TileX*tilesize+threadIdx.x int pixelY = blockIdx.y*blockDim.y+threadIdx.y;//TileY*tilesize+threadIdx.y if(pixelX >= 0 && pixelX < resolution.x && pixelY >= 0 && pixelY < resolution.y) { //Pixel is on screen. //Each thread has exclusive access to this location in the depth buffer so no complex atomics needed. int dbIndex = pixelX+pixelY*resolution.x;//Depth buffer location //tileXIndex+tileYIndex*numTilesX int tileIndex = blockIdx.x+blockIdx.y*gridDim.x; int triCount = tileBufferCounters[tileIndex]; //For each triangle in queue for(int t = 0; t < triCount; ++t) { //(tileIndex)*tileBufferSize+tri; int triIndex = tileBuffers[tileIndex*tileBufferSize+t]; if(triIndex >= 0 && triIndex < NPrimitives){ triangle tri = primitives[triIndex]; //Already in screen space (from bin rasterizer) glm::vec3 minPoint; glm::vec3 maxPoint; getAABBForTriangle(tri, minPoint, maxPoint); if(isPixelInAABB(pixelX, pixelY, minPoint, maxPoint)) { fragment frag; frag.primitiveIndex = index; frag.position.x = pixelX; frag.position.y = pixelY; glm::vec3 bCoords = calculateBarycentricCoordinate(tri, glm::vec2(pixelX,pixelY)); if(isBarycentricCoordInBounds(bCoords)) { frag.depth = tri.v0.pos.z*bCoords.x+tri.v1.pos.z*bCoords.y+tri.v2.pos.z*bCoords.z; frag.primitiveIndex = triIndex; if(frag.depth > 0.0f && frag.depth < 1.0f) { //Depth test if(frag.depth < depthbuffer[dbIndex].depth) { //Only continue if depth test passes. frag.color = tri.v0.color*bCoords.x+tri.v1.color*bCoords.y+tri.v2.color*bCoords.z; frag.normal = glm::normalize(tri.v0.eyeNormal*bCoords.x+tri.v1.eyeNormal*bCoords.y+tri.v2.eyeNormal*bCoords.z); frag.lightDir = glm::normalize(tri.v0.eyeLightDirection*bCoords.x+tri.v1.eyeLightDirection*bCoords.y+tri.v2.eyeLightDirection*bCoords.z); frag.halfVector = glm::normalize(tri.v0.eyeHalfVector*bCoords.x+tri.v1.eyeHalfVector*bCoords.y+tri.v2.eyeHalfVector*bCoords.z); writeToDepthbuffer(pixelX,pixelY, frag, depthbuffer,resolution); } } } } } } } } void binRasterizer(int NPrimitives, glm::vec2 resolution, pipelineOpts opts) { glm::vec2 binDims = glm::vec2(5,5); //Tuning params int binBufferSize = 2<<6; int batchSize = 32;//One batch per warp int numBatches = ceil(NPrimitives/float(batchSize)); int numBlocks = max(ceil(numBatches*batchSize/float(1024)), ceil(numBatches*batchSize*26/float(8192)));//26 is number of registers for kernel int batchesPerBlock = ceil(numBatches/float(numBlocks)); //Allocate bin buffers cudaMalloc((void**) &binBuffers, numBlocks*binDims.x*binDims.y*(binBufferSize)*sizeof(int)); cudaMalloc((void**) &bufferCounters, numBlocks*binDims.x*binDims.y*sizeof(int)); dim3 blockDims(batchSize, batchesPerBlock); dim3 gridDims(numBlocks); int Ns = (binDims.x*binDims.y+batchSize)*sizeof(int); binRasterizationKernel<<<gridDims,blockDims,Ns>>>( primitives, primitiveStageBuffer, NPrimitives, bufferCounters, binBuffers, binBufferSize, resolution, binDims, opts); //==============COARSE RASTER=================== int tilesize = 8;//8x8 int tileBufferSize = 2<<6; int numTilesX = ceil(resolution.x/float(tilesize)); int numTilesY = ceil(resolution.y/float(tilesize)); int numTilesPerBinX = ceil(numTilesX/float(binDims.x)); int numTilesPerBinY = ceil(numTilesY/float(binDims.y)); cudaMalloc((void**) &tileBuffers, numTilesX*numTilesY*tileBufferSize*sizeof(int)); cudaMalloc((void**) &tileBufferCounters, numTilesX*numTilesY*sizeof(int)); dim3 coarseGridDims(binDims.x, binDims.y); dim3 coarseBlockDims(numTilesPerBinX,numTilesPerBinY); Ns = (sizeof(glm::vec4)+sizeof(int))*numTilesPerBinX*numTilesPerBinY; coarseRasterizationKernel<<<coarseGridDims,coarseBlockDims,Ns>>>(primitives, NPrimitives, resolution, bufferCounters, binBuffers, binBufferSize, tileBuffers, tileBufferSize, tileBufferCounters, numTilesX, numTilesY, tilesize, numBlocks); cudaFree(binBuffers);//Free previous buffer //int* debug = (int*)malloc(numTilesX*numTilesY*sizeof(int)); //cudaMemcpy( debug, tileBufferCounters, numTilesX*numTilesY*sizeof(int), cudaMemcpyDeviceToHost); //free(debug); //==============FINE RASTER===================== dim3 tileDims(tilesize,tilesize); dim3 numTiles(numTilesX,numTilesY); Ns = 0; fineRasterizationKernel<<<numTiles,tileDims,Ns>>>(primitives, NPrimitives, resolution, tileBuffers, tileBufferSize, tileBufferCounters, depthbuffer); cudaFree(tileBufferCounters); cudaFree(tileBuffers); cudaFree(bufferCounters); } __global__ void countPixelsPerTri(triangle* primitives, int* primitiveStageBuffer, int primitivesCount, glm::vec2 resolution, int* pixelsPerTri) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<primitivesCount){ pixelsPerTri[index] = -1; int triIndex = primitiveStageBuffer[index]; if(triIndex >= 0){ //For each primitive //Load triangle localy triangle tri = primitives[triIndex]; int pixelsInside=0; //AABB for triangle glm::vec3 minPoint; glm::vec3 maxPoint; //transformTriToScreenSpace(tri, resolution); // Already done getAABBForTriangle(tri, minPoint, maxPoint); //Compute pixel range //Do some per-fragment clipping and restrict to screen space int minX = glm::max(glm::floor(minPoint.x),0.0f); int maxX = glm::min(glm::ceil(maxPoint.x),resolution.x); int minY = glm::max(glm::floor(minPoint.y),0.0f); int maxY = glm::min(glm::ceil(maxPoint.y),resolution.y); for(int x = minX; x <= maxX; ++x) { for(int y = minY; y <= maxY; ++y) { int dbindex = getDepthBufferIndex(x,y,resolution); if(dbindex < 0) continue; glm::vec3 bCoords = calculateBarycentricCoordinate(tri, glm::vec2(x,y)); if(isBarycentricCoordInBounds(bCoords)) { pixelsInside++; } } } pixelsPerTri[triIndex] = pixelsInside; } } } int* pixelsPerTri; void calculateMetrics(PerformanceMetrics &metrics, triangle* primitives, int* primitiveStageBuffer, int NPrimitives, glm::vec2 resolution, dim3 primitiveBlocks, dim3 tileSize, pipelineOpts opts) { pixelsPerTri = NULL; cudaMalloc((void**)&pixelsPerTri, NPrimitives*sizeof(int)); countPixelsPerTri<<<primitiveBlocks, tileSize>>>(primitives, primitiveStageBuffer, NPrimitives, resolution, pixelsPerTri); int* pixelCounts = new int[NPrimitives]; cudaMemcpy( pixelCounts, pixelsPerTri, NPrimitives*sizeof(int), cudaMemcpyDeviceToHost); //TODO Assemble stats in CPU for simplicity. float avgPixelsPerTri = 0.0f; int maxPixelsPerTri = 0; int NPrimitivesRendered = NPrimitives; for(int i = 0; i < NPrimitives; i++) { maxPixelsPerTri = max(maxPixelsPerTri, pixelCounts[i]); if(pixelCounts[i] < 0) { NPrimitivesRendered--; }else{ avgPixelsPerTri += pixelCounts[i]; } } metrics.NumTriangles = NPrimitives; if(NPrimitives != 0){ metrics.avgPixelsPerTri = avgPixelsPerTri/NPrimitivesRendered; metrics.maxPixelsPerTri = maxPixelsPerTri;//Record worst case } metrics.NumTrianglesRastered = NPrimitivesRendered; delete pixelCounts; cudaFree(pixelsPerTri); } // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRasterizeCore(uchar4* PBOpos, glm::vec2 resolution, float frame, float* vbo, int vbosize, float* nbo, int nbosize, float* cbo, int cbosize, int* ibo, int ibosize, uniforms u_variables, pipelineOpts opts, PerformanceMetrics &metrics) { // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize))); //set up framebuffer framebuffer = NULL; cudaMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3)); //set up depthbuffer depthbuffer = NULL; cudaMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment)); //kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states clearImage<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, framebuffer, glm::vec3(0,0,0)); fragment frag; frag.color = glm::vec3(0.0f); frag.normal = glm::vec3(0.0f); frag.position = glm::vec2(0.0f,0.0f); frag.depth = MAX_DEPTH; clearDepthBuffer<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer); //------------------------------ //memory stuff //------------------------------ primitives = NULL; cudaMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle)); primitiveStageBuffer = NULL; cudaMalloc((void**)&primitiveStageBuffer, (ibosize/3)*sizeof(int)); verticies = NULL; cudaMalloc((void**)&verticies, (vbosize)*sizeof(vertex)); device_uniforms = NULL; cudaMalloc((void**)&device_uniforms, sizeof(uniforms)); cudaMemcpy( device_uniforms, &u_variables, sizeof(uniforms), cudaMemcpyHostToDevice); device_ibo = NULL; cudaMalloc((void**)&device_ibo, ibosize*sizeof(int)); cudaMemcpy( device_ibo, ibo, ibosize*sizeof(int), cudaMemcpyHostToDevice); device_vbo = NULL; cudaMalloc((void**)&device_vbo, vbosize*sizeof(float)); cudaMemcpy( device_vbo, vbo, vbosize*sizeof(float), cudaMemcpyHostToDevice); device_nbo = NULL; cudaMalloc((void**)&device_nbo, nbosize*sizeof(float)); cudaMemcpy( device_nbo, nbo, nbosize*sizeof(float), cudaMemcpyHostToDevice); device_cbo = NULL; cudaMalloc((void**)&device_cbo, cbosize*sizeof(float)); cudaMemcpy( device_cbo, cbo, cbosize*sizeof(float), cudaMemcpyHostToDevice); tileSize = 32; int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize)); //------------------------------ //vertex shader //------------------------------ vertexShadeKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_nbo, nbosize, device_cbo, cbosize, verticies, device_uniforms, opts); checkCUDAError("Kernel failed VS!"); cudaDeviceSynchronize(); //------------------------------ //primitive assembly //------------------------------ primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize)); primitiveAssemblyKernel<<<primitiveBlocks, tileSize>>>(verticies, device_ibo, ibosize, primitives, primitiveStageBuffer, device_uniforms, opts); cudaDeviceSynchronize(); checkCUDAError("Kernel failed PA!"); int NPrimitives = ibosize/3; if(opts.backfaceCulling) { backfaceCulling<<<primitiveBlocks, tileSize>>>(primitives, primitiveStageBuffer, NPrimitives, opts); } if(opts.totalClipping) { totalClipping<<<primitiveBlocks, tileSize>>>(primitives, primitiveStageBuffer, NPrimitives, opts); } //------------------------------ //rasterization //------------------------------ LARGE_INTEGER beginTime; QueryPerformanceCounter( &beginTime ); // Code to measure ... if(opts.rasterMode == NAIVE) { rasterizationKernel<<<primitiveBlocks, tileSize>>>(primitives, primitiveStageBuffer, NPrimitives, depthbuffer, resolution, device_uniforms, opts); }else if(opts.rasterMode == BIN){ binRasterizer(NPrimitives, resolution, opts); } cudaDeviceSynchronize(); checkCUDAError("Kernel failed Raster!"); LARGE_INTEGER endTime; QueryPerformanceCounter( &endTime ); LARGE_INTEGER timerFreq; QueryPerformanceFrequency( &timerFreq ); const double freq = 1.0f / timerFreq.QuadPart; const double timeSeconds = ( endTime.QuadPart - beginTime.QuadPart )* freq;; metrics.rasterTimeSeconds = timeSeconds; if(opts.recordMetrics){ //Calculate metrics //TODO: Add more metrics calculateMetrics(metrics, primitives, primitiveStageBuffer, NPrimitives, resolution, primitiveBlocks, tileSize, opts); } //------------------------------ //fragment shader //------------------------------ fragmentShadeKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(depthbuffer, resolution, device_uniforms, opts); cudaDeviceSynchronize(); checkCUDAError("Kernel failed FS!"); //------------------------------ //write fragments to framebuffer //------------------------------ render<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer, framebuffer); sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, resolution, framebuffer); cudaDeviceSynchronize(); kernelCleanup(); checkCUDAError("Kernel failed!"); } //close to vertex __global__ void sphereCenterShadeKernel(float* vbo, int vbosize, float* cbo, int cbosize, sphere* sp, uniforms* u_variables, pipelineOpts opts){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<vbosize/4){ sphere vOut; glm::vec4 vertexEyePos = glm::vec4(vbo[index*4+0],vbo[index*4+1],vbo[index*4+2],1.0); vertexEyePos = u_variables->viewTransform*u_variables->modelTransform*vertexEyePos; //Compute lighting vectors /*glm::vec4 eyeLightPos = u_variables->viewTransform*u_variables->lightPos; glm::vec4 eyeLightDir = (eyeLightPos - vertexEyePos); glm::vec4 halfVector = (eyeLightDir - vertexEyePos);*/ //Normals are in eye space /* glm::vec4 vertexEyeNorm = glm::vec4(nbo[index*3+0],nbo[index*3+1],nbo[index*3+2],0.0); vertexEyeNorm = u_variables->viewTransform*u_variables->modelTransform*vertexEyeNorm; glm::vec3 vertexColor = glm::vec3(cbo[(index%3)*3+0],cbo[(index%3)*3+1],cbo[(index%3)*3+2]);*/ //Apply perspective matrix and perspective division glm::vec4 pos = u_variables->perspectiveTransform*vertexEyePos; pos.x /= pos.w; pos.y /= pos.w; pos.z /= pos.w; vOut.r = vbo[index*4+3] / pos.w; //Emit vertex vOut.center = glm::vec3(pos); //vOut.eyeLightDirection = glm::vec3(eyeLightDir); vOut.color = glm::vec3(cbo[3*index], cbo[3*index + 1], cbo[3*index + 2]); sp[index] = vOut; } } //TODO: Do this a lot more efficiently and in parallel __global__ void rasterizationKernelSphere(sphere* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, uniforms* u_variables, pipelineOpts opts) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<primitivesCount){ if(index >= 0){ //For each primitive //Load triangle localy int rx; int ry; transformSphereToScreenSpace(primitives[index], resolution, rx, ry); sphere sp = primitives[index]; //AABB for triangle glm::vec3 minPoint; glm::vec3 maxPoint; getAABBForSphere(sp, minPoint, maxPoint, rx, ry); //Compute pixel range //Do some per-fragment clipping and restrict to screen space int minX = max(minPoint.x,0.0f); int maxX = min(maxPoint.x,resolution.x); int minY = max(minPoint.y,0.0f); int maxY = min(maxPoint.y,resolution.y); //minX = 600, maxX = 800; //minY = 155, maxY = 214; fragment frag; frag.primitiveIndex = index; //TODO: Do something more efficient than this float rsq = sp.r * sp.r; for(int x = minX; x <= maxX; ++x) { for(int y = minY; y <= maxY; ++y) { int dbindex = getDepthBufferIndex(x,y,resolution); if(dbindex < 0) continue; frag.position.x = x; frag.position.y = y; glm::vec2 dis2 = glm::vec2(x,y) - glm::vec2(sp.center.x, sp.center.y); float dis2s = dis2.x * dis2.x + dis2.y * dis2.y; if(dis2s < rx * ry) { //Blend values. frag.depth = sp.center.z;// - glm::sqrt(rsq - dis * dis); if(frag.depth > 0.0f && frag.depth < 1.0f) { //Only continue if pixel is in screen. //TODO frag.normal = glm::normalize(glm::vec3(x, y, frag.depth) - sp.center); frag.color = glm::vec3(1,0,0); //frag.lightDir = glm::normalize(tri.v0.eyeLightDirection*bCoords.x+tri.v1.eyeLightDirection*bCoords.y+tri.v2.eyeLightDirection*bCoords.z); //frag.halfVector = glm::normalize(tri.v0.eyeHalfVector*bCoords.x+tri.v1.eyeHalfVector*bCoords.y+tri.v2.eyeHalfVector*bCoords.z); fatomicMin(&(depthbuffer[dbindex].depthPrimTag),frag.depthPrimTag); if(frag.depthPrimTag == depthbuffer[dbindex].depthPrimTag)//If this is true, we won the race condition writeToDepthbuffer(x,y,frag, depthbuffer,resolution); } } //int dbindex = getDepthBufferIndex(x,y,resolution); //frag.depth = sp.center.z;// - glm::sqrt(rsq - dis * dis); // if(frag.depth > 0.0f && frag.depth < 1.0f) // { // //Only continue if pixel is in screen. // //TODO // frag.color = glm::vec3(5,0,0); // frag.normal = glm::normalize(glm::vec3(x, y, frag.depth) - sp.center); // //frag.lightDir = glm::normalize(tri.v0.eyeLightDirection*bCoords.x+tri.v1.eyeLightDirection*bCoords.y+tri.v2.eyeLightDirection*bCoords.z); // //frag.halfVector = glm::normalize(tri.v0.eyeHalfVector*bCoords.x+tri.v1.eyeHalfVector*bCoords.y+tri.v2.eyeHalfVector*bCoords.z); // fatomicMin(&(depthbuffer[dbindex].depthPrimTag),frag.depthPrimTag); // if(frag.depthPrimTag == depthbuffer[dbindex].depthPrimTag)//If this is true, we won the race condition // writeToDepthbuffer(x,y,frag, depthbuffer,resolution); // } } } } } } // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaSphereRasterizeCore(uchar4* PBOpos, glm::vec2 resolution, float frame, float* vbo, int vbosize, float* cbo, int cbosize, uniforms u_variables, pipelineOpts opts, PerformanceMetrics &metrics) { // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize))); //set up framebuffer framebuffer = NULL; cudaMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3)); //set up depthbuffer depthbuffer = NULL; cudaMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment)); //kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states clearImage<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, framebuffer, glm::vec3(0,0,0)); fragment frag; frag.color = glm::vec3(0.0f); frag.normal = glm::vec3(0.0f); frag.position = glm::vec2(0.0f,0.0f); frag.depth = MAX_DEPTH; clearDepthBuffer<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer); //------------------------------ //memory stuff //------------------------------ //primitives = NULL; //cudaMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle)); //primitiveStageBuffer = NULL; //cudaMalloc((void**)&primitiveStageBuffer, (ibosize/3)*sizeof(int)); spheres = NULL; cudaMalloc((void**)&spheres, (vbosize)*sizeof(sphere)); device_uniforms = NULL; cudaMalloc((void**)&device_uniforms, sizeof(uniforms)); cudaMemcpy( device_uniforms, &u_variables, sizeof(uniforms), cudaMemcpyHostToDevice); /*device_ibo = NULL; cudaMalloc((void**)&device_ibo, ibosize*sizeof(int)); cudaMemcpy( device_ibo, ibo, ibosize*sizeof(int), cudaMemcpyHostToDevice);*/ device_vbo = NULL; cudaMalloc((void**)&device_vbo, vbosize*sizeof(float)); cudaMemcpy( device_vbo, vbo, vbosize*sizeof(float), cudaMemcpyHostToDevice); /*device_nbo = NULL; cudaMalloc((void**)&device_nbo, nbosize*sizeof(float)); cudaMemcpy( device_nbo, nbo, nbosize*sizeof(float), cudaMemcpyHostToDevice);*/ device_cbo = NULL; cudaMalloc((void**)&device_cbo, cbosize*sizeof(float)); cudaMemcpy( device_cbo, cbo, cbosize*sizeof(float), cudaMemcpyHostToDevice); tileSize = 32; int primitiveBlocks = ceil(((float)vbosize/4)/((float)tileSize)); //------------------------------ //vertex shader //------------------------------ sphereCenterShadeKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_cbo, cbosize, spheres, device_uniforms, opts); checkCUDAError("Kernel failed VS!"); cudaDeviceSynchronize(); //------------------------------ //primitive assembly //------------------------------ //seems no need for sphere //primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize)); //primitiveAssemblyKernel<<<primitiveBlocks, tileSize>>>(verticies, device_ibo, ibosize, primitives, primitiveStageBuffer, device_uniforms, opts); //cudaDeviceSynchronize(); //checkCUDAError("Kernel failed PA!"); //no need for backface culling /*int NPrimitives = ibosize/3; if(opts.backfaceCulling) { backfaceCulling<<<primitiveBlocks, tileSize>>>(primitives, primitiveStageBuffer, NPrimitives, opts); } if(opts.totalClipping) { totalClipping<<<primitiveBlocks, tileSize>>>(primitives, primitiveStageBuffer, NPrimitives, opts); }*/ //------------------------------ //rasterization //------------------------------ LARGE_INTEGER beginTime; QueryPerformanceCounter( &beginTime ); // Code to measure ... if(opts.rasterMode == NAIVE) { rasterizationKernelSphere<<<primitiveBlocks, tileSize>>>(spheres, vbosize / 4, depthbuffer, resolution, device_uniforms, opts); }else if(opts.rasterMode == BIN){ //binRasterizer(NPrimitives, resolution, opts); } cudaDeviceSynchronize(); checkCUDAError("Kernel failed Raster!"); LARGE_INTEGER endTime; QueryPerformanceCounter( &endTime ); LARGE_INTEGER timerFreq; QueryPerformanceFrequency( &timerFreq ); const double freq = 1.0f / timerFreq.QuadPart; const double timeSeconds = ( endTime.QuadPart - beginTime.QuadPart )* freq;; metrics.rasterTimeSeconds = timeSeconds; if(opts.recordMetrics){ //Calculate metrics //TODO: Add more metrics //calculateMetrics(metrics, primitives, primitiveStageBuffer, vbosize, resolution, primitiveBlocks, tileSize, opts); } //------------------------------ //fragment shader //------------------------------ fragmentShadeKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(depthbuffer, resolution, device_uniforms, opts); cudaDeviceSynchronize(); checkCUDAError("Kernel failed FS!"); //------------------------------ //write fragments to framebuffer //------------------------------ render<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer, framebuffer); sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, resolution, framebuffer); cudaDeviceSynchronize(); kernelCleanup(); checkCUDAError("Kernel failed!"); } void kernelCleanup(){ cudaFree( primitives ); cudaFree( device_vbo ); cudaFree( device_nbo); cudaFree( device_cbo ); cudaFree( device_ibo ); cudaFree( framebuffer ); cudaFree( depthbuffer ); cudaFree( verticies ); cudaFree( device_uniforms); cudaFree(primitiveStageBuffer); }
7c2a7a5ee192fb4ede84fe1aca6e3330ecf68d34.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
7c2a7a5ee192fb4ede84fe1aca6e3330ecf68d34.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
c16840d69ecc027bfb2e3e9a40c62e2eff641000.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> void initialInt(int *ip, int size){ for(int i = 0; i<size; i++){ ip[i] = i; } } void printMatrix(int *C, const int nx, const int ny){ int *ic = C; printf("\n Matrix: (%d, %d) \n", nx, ny); for (int iy = 0; iy < ny; iy++){ for(int ix = 0; ix < nx; ix++){ printf("%3d", ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } __global__ void printfThreadIndex(int *A, const int nx, const int ny){ int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.x * blockDim.y; unsigned int idx = iy*nx + ix; printf("thread_id (%d,%d) block_id (%d, %d) coordinate (%d, %d) global index %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); } int main(int argc, char **argv){ printf("%s Starting...\n", argv[0]); //get device information int dev = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("Using Device %d:%s\n", dev, deviceProp.name); hipSetDevice(dev); //set matrix dimention int nx = 8; int ny = 6; int nxy = nx*ny; int nBytes = nxy * sizeof(float); //malloc host memory int *h_A; h_A = (int *)malloc(nBytes); //initialize host matrix with integer initialInt(h_A, nxy); printMatrix(h_A, nx, ny); //malloc device memory int *d_MatA; hipMalloc((void **)&d_MatA, nBytes); //transfer data from host to device hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice); //setup execution configuration dim3 block(4, 2); dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y); //invoke the kernel hipLaunchKernelGGL(( printfThreadIndex), dim3(grid), dim3(block) , 0, 0, d_MatA, nx, ny); hipDeviceSynchronize(); // free host and device memory hipFree(d_MatA); free(h_A); //reset device hipDeviceReset(); return 0; }
c16840d69ecc027bfb2e3e9a40c62e2eff641000.cu
#include <cuda_runtime.h> #include <stdio.h> void initialInt(int *ip, int size){ for(int i = 0; i<size; i++){ ip[i] = i; } } void printMatrix(int *C, const int nx, const int ny){ int *ic = C; printf("\n Matrix: (%d, %d) \n", nx, ny); for (int iy = 0; iy < ny; iy++){ for(int ix = 0; ix < nx; ix++){ printf("%3d", ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } __global__ void printfThreadIndex(int *A, const int nx, const int ny){ int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.x * blockDim.y; unsigned int idx = iy*nx + ix; printf("thread_id (%d,%d) block_id (%d, %d) coordinate (%d, %d) global index %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); } int main(int argc, char **argv){ printf("%s Starting...\n", argv[0]); //get device information int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Using Device %d:%s\n", dev, deviceProp.name); cudaSetDevice(dev); //set matrix dimention int nx = 8; int ny = 6; int nxy = nx*ny; int nBytes = nxy * sizeof(float); //malloc host memory int *h_A; h_A = (int *)malloc(nBytes); //initialize host matrix with integer initialInt(h_A, nxy); printMatrix(h_A, nx, ny); //malloc device memory int *d_MatA; cudaMalloc((void **)&d_MatA, nBytes); //transfer data from host to device cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice); //setup execution configuration dim3 block(4, 2); dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y); //invoke the kernel printfThreadIndex<<< grid, block >>>(d_MatA, nx, ny); cudaDeviceSynchronize(); // free host and device memory cudaFree(d_MatA); free(h_A); //reset device cudaDeviceReset(); return 0; }
d55d51c97db68b6dfea656548885b00510aee346.hip
// !!! This is a file automatically generated by hipify!!! #include <limits> #include "custom_cuda_layers.h" #include <hip/hip_runtime_api.h> #include <cstdio> #include <cstdlib> #include <ctime> #define ATTN_THREADS 1024 #define MAX_REG_SIZE 8 #define minus_infinity -10000.0 void CheckCudaErrorAux(const char* file, unsigned line) { hipError_t err = hipGetLastError(); if (err == hipSuccess) return; std::cerr << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; throw std::runtime_error("CUDA ERROR!!!\n"); } #define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) namespace cg = cooperative_groups; __global__ void attn_softmax_v2(__half* vals, __half* mask, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, float scale, int iterations, int reduceWidth) { #if __CUDA_ARCH__ >= 700 cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float2 low_data[MAX_REG_SIZE]; float2 high_data[MAX_REG_SIZE]; __half2 h_scale = __float2half2_rn(scale); int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) : minus_infinity; low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? __half2float(vals[data_id + 1]) : minus_infinity; high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? __half2float(vals[data_id + 2]) : minus_infinity; high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? __half2float(vals[data_id + 3]) : minus_infinity; if (mask && recompute) { low_data[i].x += __half2float(mask[data_id + mask_offset]); low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); } } else { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) : minus_infinity; low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && (data_id + 1) > window_stride) && (data_id + 1) < sequence_length) ? __half2float(vals[data_id + 1]) : minus_infinity; high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && (data_id + 2) > window_stride) && (data_id + 2) < sequence_length) ? __half2float(vals[data_id + 2]) : minus_infinity; high_data[i].y = minus_infinity; if (mask && recompute) { low_data[i].x += __half2float(mask[data_id + mask_offset]); if ((data_id + 1) < sequence_length) low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); if ((data_id + 2) < sequence_length) high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); } } // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); } else { low_data[i].x = minus_infinity; low_data[i].y = minus_infinity; high_data[i].x = minus_infinity; high_data[i].y = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { low_data[i].x = __expf(low_data[i].x - max_val); low_data[i].y = __expf(low_data[i].y - max_val); high_data[i].x = __expf(high_data[i].x - max_val); high_data[i].y = __expf(high_data[i].y - max_val); sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = low_data[i].x / sum; vals[data_id + 1] = low_data[i].y / sum; vals[data_id + 2] = high_data[i].x / sum; vals[data_id + 3] = high_data[i].y / sum; } else { vals[data_id] = low_data[i].x / sum; if ((data_id + 1) < sequence_length) vals[data_id + 1] = low_data[i].y / sum; if ((data_id + 2) < sequence_length) vals[data_id + 2] = high_data[i].x / sum; } } } } #endif } __global__ void attn_softmax_v2(float* vals, float* attn_mask, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, float scale, int iterations, int reduceWidth) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float4 data[MAX_REG_SIZE]; int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { data[i].x = (data_id > window_stride ? vals[data_id] : minus_infinity); data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? vals[data_id + 1] : minus_infinity; data[i].z = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? vals[data_id + 2] : minus_infinity; data[i].w = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? vals[data_id + 3] : minus_infinity; if (attn_mask && recompute) { data[i].x += attn_mask[data_id + mask_offset]; data[i].y += attn_mask[data_id + mask_offset + 1]; data[i].z += attn_mask[data_id + mask_offset + 2]; data[i].w += attn_mask[data_id + mask_offset + 3]; } } else { data[i].x = data_id > window_stride ? vals[data_id] : minus_infinity; data[i].y = (((!triangular || (data_id + 1) <= seq_id)) && (data_id + 1) > window_stride && (data_id + 1) < sequence_length) ? (vals[data_id + 1]) : minus_infinity; data[i].z = (((!triangular || (data_id + 2) <= seq_id)) && (data_id + 2) > window_stride && (data_id + 2) < sequence_length) ? (vals[data_id + 2]) : minus_infinity; data[i].w = minus_infinity; if (attn_mask && recompute) { data[i].x += attn_mask[data_id + mask_offset]; if ((data_id + 1) < sequence_length) data[i].y += attn_mask[data_id + mask_offset + 1]; if ((data_id + 2) < sequence_length) data[i].z += attn_mask[data_id + mask_offset + 2]; } } max_val = (data[i].x > max_val ? data[i].x : max_val); max_val = (data[i].y > max_val ? data[i].y : max_val); max_val = (data[i].z > max_val ? data[i].z : max_val); max_val = (data[i].w > max_val ? data[i].w : max_val); } else { data[i].x = minus_infinity; data[i].y = minus_infinity; data[i].z = minus_infinity; data[i].w = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { data[i].x = __expf(data[i].x - max_val); data[i].y = __expf(data[i].y - max_val); data[i].z = __expf(data[i].z - max_val); data[i].w = __expf(data[i].w - max_val); sum += (data[i].x + data[i].y + data[i].z + data[i].w); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = data[i].x / sum; vals[data_id + 1] = data[i].y / sum; vals[data_id + 2] = data[i].z / sum; vals[data_id + 3] = data[i].w / sum; } else { vals[data_id] = data[i].x / sum; if ((data_id + 1) < sequence_length) vals[data_id + 1] = data[i].y / sum; if ((data_id + 2) < sequence_length) vals[data_id + 2] = data[i].z / sum; } } } } } template <typename T> void launch_attn_softmax_v2(T* vals, T* mask, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, float scale, hipStream_t stream) { int total_count = batch_size * heads * num_seq; dim3 grid_dim((total_count - 1) / (WARP_SIZE / ((sequence_length - 1) / ATTN_THREADS + 1)) + 1); dim3 block_dim(ATTN_THREADS); const int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1) * WARP_SIZE; const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; if (sequence_length <= 32768) hipLaunchKernelGGL(( attn_softmax_v2), dim3(grid_dim), dim3(block_dim), 0, stream, vals, mask, triangular, recompute, local_attention, window_size, total_count, (triangular ? (heads * batch_size) : heads), sequence_length, num_seq, scale, iterations, reduce_width); else throw std::runtime_error("Unsupport Seq_Length!"); } template void launch_attn_softmax_v2(float* vals, float* mask, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, float scale, hipStream_t stream); template void launch_attn_softmax_v2(__half* vals, __half* mask, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, float scale, hipStream_t stream);
d55d51c97db68b6dfea656548885b00510aee346.cu
#include <limits> #include "custom_cuda_layers.h" #include <cuda_profiler_api.h> #include <cstdio> #include <cstdlib> #include <ctime> #define ATTN_THREADS 1024 #define MAX_REG_SIZE 8 #define minus_infinity -10000.0 void CheckCudaErrorAux(const char* file, unsigned line) { cudaError_t err = cudaGetLastError(); if (err == cudaSuccess) return; std::cerr << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; throw std::runtime_error("CUDA ERROR!!!\n"); } #define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) namespace cg = cooperative_groups; __global__ void attn_softmax_v2(__half* vals, __half* mask, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, float scale, int iterations, int reduceWidth) { #if __CUDA_ARCH__ >= 700 cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float2 low_data[MAX_REG_SIZE]; float2 high_data[MAX_REG_SIZE]; __half2 h_scale = __float2half2_rn(scale); int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) : minus_infinity; low_data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? __half2float(vals[data_id + 1]) : minus_infinity; high_data[i].x = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? __half2float(vals[data_id + 2]) : minus_infinity; high_data[i].y = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? __half2float(vals[data_id + 3]) : minus_infinity; if (mask && recompute) { low_data[i].x += __half2float(mask[data_id + mask_offset]); low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); high_data[i].y += __half2float(mask[data_id + mask_offset + 3]); } } else { low_data[i].x = data_id > window_stride ? __half2float(vals[data_id]) : minus_infinity; low_data[i].y = (((!triangular || (data_id + 1) <= seq_id) && (data_id + 1) > window_stride) && (data_id + 1) < sequence_length) ? __half2float(vals[data_id + 1]) : minus_infinity; high_data[i].x = (((!triangular || (data_id + 2) <= seq_id) && (data_id + 2) > window_stride) && (data_id + 2) < sequence_length) ? __half2float(vals[data_id + 2]) : minus_infinity; high_data[i].y = minus_infinity; if (mask && recompute) { low_data[i].x += __half2float(mask[data_id + mask_offset]); if ((data_id + 1) < sequence_length) low_data[i].y += __half2float(mask[data_id + mask_offset + 1]); if ((data_id + 2) < sequence_length) high_data[i].x += __half2float(mask[data_id + mask_offset + 2]); } } // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); } else { low_data[i].x = minus_infinity; low_data[i].y = minus_infinity; high_data[i].x = minus_infinity; high_data[i].y = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { low_data[i].x = __expf(low_data[i].x - max_val); low_data[i].y = __expf(low_data[i].y - max_val); high_data[i].x = __expf(high_data[i].x - max_val); high_data[i].y = __expf(high_data[i].y - max_val); sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = low_data[i].x / sum; vals[data_id + 1] = low_data[i].y / sum; vals[data_id + 2] = high_data[i].x / sum; vals[data_id + 3] = high_data[i].y / sum; } else { vals[data_id] = low_data[i].x / sum; if ((data_id + 1) < sequence_length) vals[data_id + 1] = low_data[i].y / sum; if ((data_id + 2) < sequence_length) vals[data_id + 2] = high_data[i].x / sum; } } } } #endif } __global__ void attn_softmax_v2(float* vals, float* attn_mask, bool triangular, bool recompute, bool local_attention, int window_size, int total_count, int heads, int sequence_length, int num_seq, float scale, int iterations, int reduceWidth) { cg::thread_block b = cg::this_thread_block(); cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b); float4 data[MAX_REG_SIZE]; int wid = threadIdx.x >> 5; int lane = threadIdx.x & 0x1f; int warp_num = blockDim.x >> 5; int reduce_blocks = reduceWidth >> 5; int seq_lane = threadIdx.x % reduceWidth; __shared__ float partialSum[MAX_WARP_NUM]; int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); if (iter_offset < total_count) { vals += (iter_offset * sequence_length); int mask_offset = (iter_offset / (heads * num_seq)) * (sequence_length); int seq_id = iter_offset % num_seq; int seq_id4 = seq_id >> 2; int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) ? (real_seq_id >> 2) - (window_size >> 2) : 0; int window_stride = (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; float max_val = minus_infinity; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if ((!triangular || ((data_id >> 2) <= seq_id4)) && (data_id >> 2) >= window_stride4 && data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { data[i].x = (data_id > window_stride ? vals[data_id] : minus_infinity); data[i].y = ((!triangular || ((data_id + 1) <= seq_id)) && (data_id + 1) > window_stride) ? vals[data_id + 1] : minus_infinity; data[i].z = ((!triangular || ((data_id + 2) <= seq_id)) && (data_id + 2) > window_stride) ? vals[data_id + 2] : minus_infinity; data[i].w = ((!triangular || ((data_id + 3) <= seq_id)) && (data_id + 3) > window_stride) ? vals[data_id + 3] : minus_infinity; if (attn_mask && recompute) { data[i].x += attn_mask[data_id + mask_offset]; data[i].y += attn_mask[data_id + mask_offset + 1]; data[i].z += attn_mask[data_id + mask_offset + 2]; data[i].w += attn_mask[data_id + mask_offset + 3]; } } else { data[i].x = data_id > window_stride ? vals[data_id] : minus_infinity; data[i].y = (((!triangular || (data_id + 1) <= seq_id)) && (data_id + 1) > window_stride && (data_id + 1) < sequence_length) ? (vals[data_id + 1]) : minus_infinity; data[i].z = (((!triangular || (data_id + 2) <= seq_id)) && (data_id + 2) > window_stride && (data_id + 2) < sequence_length) ? (vals[data_id + 2]) : minus_infinity; data[i].w = minus_infinity; if (attn_mask && recompute) { data[i].x += attn_mask[data_id + mask_offset]; if ((data_id + 1) < sequence_length) data[i].y += attn_mask[data_id + mask_offset + 1]; if ((data_id + 2) < sequence_length) data[i].z += attn_mask[data_id + mask_offset + 2]; } } max_val = (data[i].x > max_val ? data[i].x : max_val); max_val = (data[i].y > max_val ? data[i].y : max_val); max_val = (data[i].z > max_val ? data[i].z : max_val); max_val = (data[i].w > max_val ? data[i].w : max_val); } else { data[i].x = minus_infinity; data[i].y = minus_infinity; data[i].z = minus_infinity; data[i].w = minus_infinity; } } for (int i = 1; i < WARP_SIZE; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = max_val; b.sync(); if (lane < warp_num) max_val = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { auto temp = g.shfl_xor(max_val, i); max_val = (temp > max_val ? temp : max_val); } max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); } float sum = 0; for (int i = 0; i < iterations; i++) { data[i].x = __expf(data[i].x - max_val); data[i].y = __expf(data[i].y - max_val); data[i].z = __expf(data[i].z - max_val); data[i].w = __expf(data[i].w - max_val); sum += (data[i].x + data[i].y + data[i].z + data[i].w); } for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); if (reduceWidth > WARP_SIZE) { if (lane == 0) partialSum[wid] = sum; b.sync(); if (lane < warp_num) sum = partialSum[lane]; b.sync(); for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } sum = g.shfl(sum, threadIdx.x / WARP_SIZE); } sum += 1e-6; for (int i = 0; i < iterations; i++) { int data_id = i * (reduceWidth << 2) + (seq_lane << 2); if (data_id < sequence_length) { if ((sequence_length - data_id) >= 4) { vals[data_id] = data[i].x / sum; vals[data_id + 1] = data[i].y / sum; vals[data_id + 2] = data[i].z / sum; vals[data_id + 3] = data[i].w / sum; } else { vals[data_id] = data[i].x / sum; if ((data_id + 1) < sequence_length) vals[data_id + 1] = data[i].y / sum; if ((data_id + 2) < sequence_length) vals[data_id + 2] = data[i].z / sum; } } } } } template <typename T> void launch_attn_softmax_v2(T* vals, T* mask, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, float scale, cudaStream_t stream) { int total_count = batch_size * heads * num_seq; dim3 grid_dim((total_count - 1) / (WARP_SIZE / ((sequence_length - 1) / ATTN_THREADS + 1)) + 1); dim3 block_dim(ATTN_THREADS); const int reduce_width = ((sequence_length - 1) / ATTN_THREADS + 1) * WARP_SIZE; const int iterations = (sequence_length - 1) / (reduce_width << 2) + 1; if (sequence_length <= 32768) attn_softmax_v2<<<grid_dim, block_dim, 0, stream>>>( vals, mask, triangular, recompute, local_attention, window_size, total_count, (triangular ? (heads * batch_size) : heads), sequence_length, num_seq, scale, iterations, reduce_width); else throw std::runtime_error("Unsupport Seq_Length!"); } template void launch_attn_softmax_v2(float* vals, float* mask, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, float scale, cudaStream_t stream); template void launch_attn_softmax_v2(__half* vals, __half* mask, bool triangular, bool recompute, bool local_attention, int window_size, int batch_size, int heads, int num_seq, int sequence_length, float scale, cudaStream_t stream);
23e3cfa185ac8709cab0c5ad34b4e28cf38dc031.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_copysign (const int n, const REAL* x, const int offset_x, const int stride_x, const REAL* y, const int offset_y, const int stride_y, REAL* z, const int offset_z, const int stride_z) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { z[offset_z + gid * stride_z] = CAST(copysign)(x[offset_x + gid * stride_x], y[offset_y + gid * stride_y]); } }
23e3cfa185ac8709cab0c5ad34b4e28cf38dc031.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_copysign (const int n, const REAL* x, const int offset_x, const int stride_x, const REAL* y, const int offset_y, const int stride_y, REAL* z, const int offset_z, const int stride_z) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { z[offset_z + gid * stride_z] = CAST(copysign)(x[offset_x + gid * stride_x], y[offset_y + gid * stride_y]); } }
34d47c6e252d0c28cdba99c46897174037399f34.hip
// !!! This is a file automatically generated by hipify!!! #include "ActionReconstructionApp.h" #include <cmath> #include <chrono> #include <thread> #include <time.h> #include <cinder/app/App.h> #include <cinder/app/RendererGl.h> #include <cinder/gl/gl.h> #include <cinder/CameraUi.h> #include <cinder/params/Params.h> #include <cinder/Log.h> #include <cinder/ObjLoader.h> #include <Utils.h> #include <InputConfig.h> #include <DataCamera.h> #include <InputDataLoader.h> #include <BackgroundWorker.h> #include <MeshReconstruction.h> #include <TransferFunctionEditor.h> #include <VolumeVisualization.h> #include <CoordinateTransformation.h> #include <cuPrintf.cuh> #include <tinyformat.h> #include <ObjectLoader.h> #include <GroundTruthToSdf.h> #include "resources/Resources.h" #include "Parameters.h" using namespace ci::app; using namespace std; using namespace ar; ActionReconstructionApp::ActionReconstructionApp() : currentState_(State::InspectScan) { worker_ = std::make_unique<ar3d::BackgroundWorker2>(); std::time_t result = std::time(nullptr); ci::log::makeLogger<ci::log::LoggerFile>(tinyformat::format("%sMainApp%s.log", getAppPath().string(), std::asctime(std::localtime(&result))), true); } void ActionReconstructionApp::setup() { CUMAT_SAFE_CALL(cudaPrintfInit()); //parameter ui, must happen before user-camera paramsGeneralInterface_ = ci::params::InterfaceGl::create(getWindow(), "General", toPixels(ci::ivec2(300, 250))); paramsInspectScanInterface_ = ci::params::InterfaceGl::create(getWindow(), "Inspect Scan", toPixels(ci::ivec2(300, 580))); paramsReconstructionInterface_ = ci::params::InterfaceGl::create(getWindow(), "Reconstruction", toPixels(ci::ivec2(300, 580))); paramsViewResultInterface_ = ci::params::InterfaceGl::create(getWindow(), "View Result", toPixels(ci::ivec2(300, 580))); paramsReconstructionInterface_->show(false); paramsViewResultInterface_->show(false); //transfer function editor tfe_ = std::make_unique<TransferFunctionEditor>(getWindow()); //volume visualization volumeVis_ = std::make_unique<ar3d::VolumeVisualization>(getWindow(), &camera_, &paramsGeneral_.volumeVisParams_); //user-camera camUi_ = ci::CameraUi(&camera_, getWindow()); ci::vec3 newEye = camera_.getEyePoint() + camera_.getViewDirection() * (camera_.getPivotDistance() * (1 - 0.2f)); camera_.setEyePoint(newEye); camera_.setPivotDistance(camera_.getPivotDistance() * 0.2f); //floor auto plane = cinder::geom::Plane().size(ci::vec2(4, 4)).subdivisions(ci::ivec2(5, 5)); vector<ci::gl::VboMesh::Layout> bufferLayout = { ci::gl::VboMesh::Layout().usage(GL_DYNAMIC_DRAW).attrib(cinder::geom::Attrib::POSITION, 3), ci::gl::VboMesh::Layout().usage(GL_STATIC_DRAW).attrib(cinder::geom::Attrib::TEX_COORD_0, 2) }; floorVboMesh_ = ci::gl::VboMesh::create(plane, bufferLayout); ci::gl::Texture::Format floorTextureFmt; floorTextureFmt.enableMipmapping(true); floorTextureFmt.setMinFilter(GL_LINEAR_MIPMAP_LINEAR); floorTexture_ = ci::gl::Texture::create(loadImage(loadResource(CHECKERBOARD_IMAGE)), floorTextureFmt); //ground truth shader groundTruthShader_ = ci::gl::GlslProg::create(ci::gl::GlslProg::Format() .vertex(R"GLSL( #version 150 uniform mat4 ciModelViewProjection; in vec4 ciPosition; in vec4 ciColor; out vec4 Color; void main(void) { gl_Position = ciModelViewProjection * ciPosition; Color = ciColor; } )GLSL") .fragment(R"GLSL( #version 150 in vec4 Color; out vec4 oColor; void main(void) { oColor = Color; } )GLSL" )); setupGeneral(); setupInspectScan(); setupReconstruction(); setupViewResult(); } void ActionReconstructionApp::setupGeneral() { paramsGeneralInterface_->addButton("Load", [this]() {this->load(); }); paramsGeneralInterface_->addButton("Save", [this]() {this->save(); }); paramsGeneralInterface_->addParam("Rendering-SaveFrameNames", &frameNames_).label("Frame Names"); paramsGeneralInterface_->addParam("Rendering-ExportWithNormals", &exportWithNormals_).group("Rendering").label("Export \\w normals + orig.pos."); paramsGeneral_.addParams(paramsGeneralInterface_); paramsGeneralInterface_->setPosition(glm::ivec2(5, 590)); } void ActionReconstructionApp::setupInspectScan() { paramsInspectScan_.addParams(paramsInspectScanInterface_); paramsInspectScanInterface_->addButton("InspectUseGroundTruth", [this]() {this->inspectScanUseGroundTruth(); }, "label='Use Ground Truth'"); paramsInspectScanInterface_->addButton("Next-1-2", [this]() {this->nextReconstruction(); }, "label='Next - Reconstruction'"); paramsInspectScanInterface_->setPosition(glm::ivec2(5, 5)); } void ActionReconstructionApp::setupReconstruction() { visualizePartialObservations_ = false; paramsReconstruction_.addParams(paramsReconstructionInterface_); paramsReconstructionInterface_->addButton("RecForwardStep", [this]() {this->reconstructionForwardStep(); }, "label='Forward Step' "); paramsReconstructionInterface_->addButton("RecReset", [this]() {this->reconstructionReset(); }, "label='Reset' key=r "); paramsReconstructionInterface_->addButton("RecSolve", [this]() {this->reconstructionSolve(); }, "label='Solve' key=Return "); paramsReconstructionInterface_->addButton("RecTest", [this]() {this->reconstructionTestGradient(); }, "label='Test Gradient' "); paramsReconstructionInterface_->addParam("RecPartObsVis", &visualizePartialObservations_).label("Visualize Observations"); paramsReconstructionInterface_->addButton("Prev-2-1", [this]() {this->prevInspectScan(); }, "label='Prev - Inspect Scan'"); paramsReconstructionInterface_->addButton("Next-2-3", [this]() {this->nextViewResult(); }, "label='Next - View Result'"); paramsReconstructionInterface_->setPosition(glm::ivec2(5, 5)); //plots typedef ar3d::AdjointSolver::InputVariables V; typedef ar3d::SoftBodySimulation3D::Settings S; costPlot = std::make_unique<GraphPlot>("Cost"); costPlot->setTrueValue(0); //plots.emplace_back(std::make_unique<GraphPlot>("GravityX"), [](const S& var) {return var.gravity_.x; }, [](const V& var) {return var.optimizeGravity_; }); plots.emplace_back(std::make_unique<GraphPlot>("GravityY"), [](const S& var) {return var.gravity_.y; }, [](const V& var) {return var.optimizeGravity_; }); //plots.emplace_back(std::make_unique<GraphPlot>("GravityZ"), [](const S& var) {return var.gravity_.z; }, [](const V& var) {return var.optimizeGravity_; }); plots.emplace_back(std::make_unique<GraphPlot>("Young's Modulus"), [](const S& var) {return var.youngsModulus_; }, [](const V& var) {return var.optimizeYoungsModulus_; }); plots.emplace_back(std::make_unique<GraphPlot>("Poisson Ratio"), [](const S& var) {return var.poissonsRatio_; }, [](const V& var) {return var.optimizePoissonRatio_; }); plots.emplace_back(std::make_unique<GraphPlot>("Mass"), [](const S& var) {return var.mass_; }, [](const V& var) {return var.optimizeMass_; }); plots.emplace_back(std::make_unique<GraphPlot>("Mass Damping"), [](const S& var) {return var.dampingAlpha_; }, [](const V& var) {return var.optimizeMassDamping_; }); plots.emplace_back(std::make_unique<GraphPlot>("Stiffness Damping"), [](const S& var) {return var.dampingBeta_; }, [](const V& var) {return var.optimizeStiffnessDamping_; }); plots.emplace_back(std::make_unique<GraphPlot>("LinearVelocityX"), [](const S& var) {return var.initialLinearVelocity_.x; }, [](const V& var) {return var.optimizeInitialLinearVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("LinearVelocityY"), [](const S& var) {return var.initialLinearVelocity_.y; }, [](const V& var) {return var.optimizeInitialLinearVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("LinearVelocityZ"), [](const S& var) {return var.initialLinearVelocity_.z; }, [](const V& var) {return var.optimizeInitialLinearVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("AngularVelocityX"), [](const S& var) {return var.initialAngularVelocity_.x; }, [](const V& var) {return var.optimizeInitialAngularVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("AngularVelocityY"), [](const S& var) {return var.initialAngularVelocity_.y; }, [](const V& var) {return var.optimizeInitialAngularVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("AngularVelocityZ"), [](const S& var) {return var.initialAngularVelocity_.z; }, [](const V& var) {return var.optimizeInitialAngularVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("Ground Plane Theta"), [](const S& var) {return ar3d::CoordinateTransformation::cartesian2spherical(var.groundPlane_).y; }, [](const V& var) {return var.optimizeGroundPlane_; }); plots.emplace_back(std::make_unique<GraphPlot>("Ground Plane Phi"), [](const S& var) {return ar3d::CoordinateTransformation::cartesian2spherical(var.groundPlane_).z; }, [](const V& var) {return var.optimizeGroundPlane_; }); plots.emplace_back(std::make_unique<GraphPlot>("Ground Plane Height"), [](const S& var) {return var.groundPlane_.w; }, [](const V& var) {return var.optimizeGroundPlane_; }); } void ActionReconstructionApp::setupViewResult() { paramsViewResult_.addParams(paramsViewResultInterface_); paramsViewResultInterface_->addButton("Prev-3-2", [this]() {this->prevReconstruction(); }, "label='Prev - Reconstruction'"); paramsViewResultInterface_->setPosition(glm::ivec2(5, 5)); } void ActionReconstructionApp::keyDown(KeyEvent event) { App::keyDown(event); if (event.getChar() == 'f') { // Toggle full screen when the user presses the 'f' key. setFullScreen(!isFullScreen()); } else if (event.getCode() == KeyEvent::KEY_ESCAPE) { // Exit full screen, or quit the application, when the user presses the ESC key. if (isFullScreen()) setFullScreen(false); else quit(); } else if (event.getChar() == 'p') { //Screenshot ci::Surface surface = copyWindowSurface(); //construct filename time_t now = time(NULL); struct tm tstruct; char buf[100]; localtime_s(&tstruct, &now); strftime(buf, sizeof(buf), "%d-%m-%Y_%H-%M-%S", &tstruct); string fileName = string("screenshot-") + string(buf) + ".png"; //write out ci::writeImage(fileName, surface); } else if (event.getChar() == 'l') { //reload resources, shaders, ... volumeVis_->reloadResources(); } else if (event.getCode() == KeyEvent::KEY_SPACE) { spaceBarPressed_ = true; } } void ActionReconstructionApp::keyUp(KeyEvent event) { App::keyUp(event); if (event.getCode() == KeyEvent::KEY_SPACE && spaceBarPressed_) { spaceBarPressed_ = false; } } void ActionReconstructionApp::mouseDown( MouseEvent event ) { App::mouseDown(event); } void ActionReconstructionApp::update() { //save animation from previous frame if (animationTakeScreenshot_ && animationRendered_ && !frameNames_.empty()) { //Screenshot cinder::Surface surface = copyWindowSurface(); string fileName = tinyformat::format("../screenshots/%s%05d.png", frameNames_.c_str(), frameCounter_); writeImage(fileName, surface); CI_LOG_I("screenshot saved to " << fileName); ////SDF file //if (gridSimulation_) { // string fileName = tinyformat::format("../screenshots/%s%05d.sdf", frameNames_.c_str(), frameCounter_); // volumeVis_->saveSdf(fileName); //} //Marching Cubes mesh file if (gridSimulation_) { string fileName = tinyformat::format("../screenshots/%s%05d.obj", frameNames_.c_str(), frameCounter_); volumeVis_->saveMCMesh(fileName); } //High Resolution mesh file if (gridSimulation_) { string fileName = tinyformat::format("../screenshots/%s_high%05d.obj", frameNames_.c_str(), frameCounter_); volumeVis_->saveHighResultMesh(fileName, exportWithNormals_, exportWithNormals_); } //done } if (animationTakeScreenshot_ && animationRendered_) { animationRendered_ = false; animationTakeScreenshot_ = false; } switch (currentState_) { case State::InspectScan: updateInspectScan(); break; case State::Reconstruction: updateReconstruction(); break; case State::ViewResult: updateViewResult(); break; } //update transfer function editor tfe_->setVisible(volumeVis_->needsTransferFunction()); tfe_->update(); } void ActionReconstructionApp::updateInspectScan() { if (!worker_->isDone()) return; int hasChanges = -1; static ar3d::InputConfigPtr oldConfigPtr = nullptr; static int oldFrame = -1; static ar3d::MeshReconstruction::Settings oldMeshReconstructionSettings; //check, if the input file has changed const ar3d::InputConfigPtr config = paramsInspectScan_.getConfig(); if (!config) return; if (oldConfigPtr != config) { oldConfigPtr = paramsInspectScan_.getConfig(); //create data loader dataLoader_ = std::make_shared<ar3d::InputDataLoader>(paramsInspectScan_.getConfig()); hasChanges = 0; //reset oldFrame = -1; memset(&oldMeshReconstructionSettings, 0, sizeof(ar3d::MeshReconstruction::Settings)); groundTruthVboMesh_ = nullptr; frameData_ = nullptr; meshReconstruction_ = nullptr; } //check if the frame has changed const int frame = paramsInspectScan_.frame_; if (oldFrame != frame) { oldFrame = paramsInspectScan_.frame_; groundTruthVboMesh_ = nullptr; hasChanges = 0; } //check if the reconstruction settings have changed const ar3d::MeshReconstruction::Settings settings = paramsInspectScan_.meshReconstructionSettings.getSettings(); if (memcmp(&oldMeshReconstructionSettings, &settings, sizeof(ar3d::MeshReconstruction::Settings)) != 0) { oldMeshReconstructionSettings = paramsInspectScan_.meshReconstructionSettings.getSettings(); if (hasChanges == -1) hasChanges = 0; } //load in a background thread if (hasChanges >= 0) { //declare background task std::function<void(ar3d::BackgroundWorker2*)> task = [this, hasChanges, frame, settings, config] (ar3d::BackgroundWorker2* worker) { if (hasChanges <= 0) { //These steps only depend on the frame worker->setStatus("Load color and depth images"); auto frameDataTmp = this->dataLoader_->loadFrame(frame, worker); if (worker->isInterrupted()) return; this->frameData_ = frameDataTmp; worker->setStatus("Initialize mesh reconstruction"); auto meshReconstructionTmp = make_shared<ar3d::MeshReconstruction>(config, this->frameData_); if (worker->isInterrupted()) return; this->meshReconstruction_ = meshReconstructionTmp; worker->setStatus("Segment images"); this->meshReconstruction_->runSegmentation(worker); if (worker->isInterrupted()) return; worker->setStatus("Find bounding box"); this->meshReconstruction_->runBoundingBoxExtraction(worker); if (worker->isInterrupted()) return; } if (hasChanges <= 1) { //These depend on MeshReconstruction::Settings this->meshReconstruction_->settings = settings; worker->setStatus("Initialize 3D reconstruction"); this->meshReconstruction_->runInitReconstruction(worker); if (worker->isInterrupted()) return; worker->setStatus("Optimize 3D reconstruction"); this->meshReconstruction_->runReconstruction(worker); if (worker->isInterrupted()) return; worker->setStatus("Recover full signed distance function"); this->meshReconstruction_->runSignedDistanceReconstruction(worker); if (worker->isInterrupted()) return; referenceSdf_ = meshReconstruction_->getSdfData(); } frameCounter_ = frame; animationTakeScreenshot_ = true; }; //start background worker worker_->launch(task); CI_LOG_I("Background worker started from section " << hasChanges); } } void ActionReconstructionApp::inspectScanUseGroundTruth() { if (!worker_->isDone()) return; if (!paramsInspectScan_.getConfig()->groundTruthMeshes && !paramsInspectScan_.getConfig()->groundTruth) { CI_LOG_W("input dataset does not contain ground thruth data"); return; } std::function<void(ar3d::BackgroundWorker2*)> task = [this](ar3d::BackgroundWorker2* worker) { worker->setStatus("Convert ground truth to SDF"); int frame = paramsInspectScan_.frame_; int resolution = paramsInspectScan_.meshReconstructionSettings.getSettings().gridResolution; referenceSdf_ = ar3d::groundTruthToSdf(paramsInspectScan_.getConfig(), frame, resolution); }; worker_->launch(task); } void ActionReconstructionApp::updateReconstruction() { static ar3d::WorldGridRealDataPtr oldSdf = nullptr; static ar3d::SoftBodySimulation3D::InputSdfSettings oldInputSettings; static ar3d::SoftBodySimulation3D::Settings oldElasticitySettings; if (!worker_->isDone()) return; if (oldSdf != referenceSdf_ || oldInputSettings != paramsReconstruction_.inputSettings_.getSettings()) { //input changed oldSdf = referenceSdf_; oldInputSettings = paramsReconstruction_.inputSettings_.getSettings(); oldElasticitySettings = paramsReconstruction_.simulationSettings_.getSettings(); ar3d::BackgroundWorker2::task task = [this](ar3d::BackgroundWorker2* worker) { gridSimulation_.reset(); worker->setStatus("Create Input"); ar3d::SoftBodyGrid3D::Input input = ar3d::SoftBodyGrid3D::createFromSdf( this->paramsReconstruction_.inputSettings_.getSettings(), this->referenceSdf_); this->gridSimulation_ = std::make_unique<ar3d::SoftBodyGrid3D>(input); this->gridSimulation_->setSettings(paramsReconstruction_.simulationSettings_.getSettings()); this->gridSimulation_->setRecordTimings(true); worker->setStatus("Prepare rendering"); this->volumeVis_->setInput(this->gridSimulation_->getInput()); //results->input_ = gridSimulation->getInput(); frameCounter_ = 0; frameData_ = nullptr; }; worker_->launch(task); } if (!worker_->isDone()) return; if (oldElasticitySettings != paramsReconstruction_.simulationSettings_.getSettings()) { //elasticity settings changed oldElasticitySettings = paramsReconstruction_.simulationSettings_.getSettings(); ar3d::BackgroundWorker2::task task = [this](ar3d::BackgroundWorker2* worker) { if (gridSimulation_) { gridSimulation_->setSettings(paramsReconstruction_.simulationSettings_.getSettings()); //results->settings_ = simulationSettings_.getSettings(); } }; worker_->launch(task); } if (spaceBarPressed_ && !animationTakeScreenshot_) //space bar pressed and current frame saved reconstructionForwardStep(); } void ActionReconstructionApp::reconstructionForwardStep() { if (worker_->isDone() && !animationTakeScreenshot_) { ar3d::BackgroundWorker2::task task = [this](ar3d::BackgroundWorker2* worker) { gridSimulation_->solve(true, worker, true); volumeVis_->update(gridSimulation_->getState()); worker->setStatus("Prepare partial observation visualization"); frameData_ = dataLoader_->loadFrame(frameCounter_/(paramsReconstruction_.costIntermediateSteps_+1), worker); if (visualizePartialObservations_) { observationVis_.setObservation( paramsInspectScan_.getConfig()->cameras[0].camera, frameData_->cameraImages[0].depthMatrix, gridSimulation_->getInput(), gridSimulation_->getState()); } frameCounter_++; animationTakeScreenshot_ = true; animationRendered_ = false; }; worker_->launch(task); } } void ActionReconstructionApp::reconstructionResetPlots() { costPlot->clear(); for (const auto& e : plots) { std::get<0>(e)->clear(); } } void ActionReconstructionApp::reconstructionReset() { CI_LOG_I("Reset simulation"); //wait for the current task if (worker_) { worker_->interrupt(); worker_->wait(); } //reset simulation frameCounter_ = 0; frameData_ = nullptr; if (gridSimulation_) { gridSimulation_->reset(); volumeVis_->update(gridSimulation_->getState()); } //reset plots reconstructionResetPlots(); } void ActionReconstructionApp::reconstructionSolve() { if (worker_->isDone()) { //setup plots reconstructionResetPlots(); costPlot->setMaxPoints(paramsReconstruction_.adjointSettings_.getSettings().numIterations_+1); for (const auto& e : plots) { std::get<0>(e)->setMaxPoints(paramsReconstruction_.adjointSettings_.getSettings().numIterations_+1); std::get<0>(e)->setTrueValue(std::get<1>(e)(paramsReconstruction_.simulationSettings_.getSettings())); std::get<0>(e)->addPoint(std::get<1>(e)(paramsReconstruction_.simulationSettings_.getSettings())); } //declare worker ar3d::BackgroundWorker2::task task = [this](ar3d::BackgroundWorker2* worker) { worker->setStatus("Solve: Prepare input and settings"); ar3d::real timestep = 1.0 / (paramsInspectScan_.getConfig()->framerate * (paramsReconstruction_.costIntermediateSteps_ + 1)); ar3d::SimulationResults3DPtr results = std::make_shared<ar3d::SimulationResults3D>(); results->input_ = gridSimulation_->getInput(); results->settings_ = gridSimulation_->getSettings(); results->settings_.timestep_ = timestep; worker->setStatus("Solve: Load observations"); //for now, use all cameras std::vector<ar3d::real> timestepWeights; ar3d::CostFunctionPartialObservations::Observations observations; observations.gpuEvaluate_ = true; observations.maxSdf_ = 5; observations.noise_ = 0; //not needed because we already have the observations const auto& inputConfig = paramsInspectScan_.getConfig(); observations.numCameras_ = inputConfig->cameras.size(); observations.cameras_.resize(inputConfig->cameras.size()); for (size_t i = 0; i < inputConfig->cameras.size(); ++i) { observations.cameras_[i] = inputConfig->cameras[i].camera; } int numSteps = paramsReconstruction_.costNumSteps_ == 0 ? inputConfig->duration-1 : paramsReconstruction_.costNumSteps_; for (int i = 1; i <= numSteps; ++i) { worker->setStatus(tinyformat::format("Solve: Load observation %d / %d", i, numSteps)); //add in-between frames without weight for (int j = 0; j < paramsReconstruction_.costIntermediateSteps_; ++j) { timestepWeights.push_back(0); observations.observations_.emplace_back(); } //load camera images and copy them to the GPU timestepWeights.push_back(1); ar3d::CostFunctionPartialObservations::Observation observation; observation.resize(inputConfig->cameras.size()); const auto dataFrame = dataLoader_->loadFrame(i, worker); for (size_t j = 0; j < inputConfig->cameras.size(); ++j) { Eigen::Matrix<ar3d::real, Eigen::Dynamic, Eigen::Dynamic> host = dataFrame->cameraImages[j].depthMatrix.cast<ar3d::real>().matrix(); observation[j] = ar3d::CostFunctionPartialObservations::Image::fromEigen(host); } observations.observations_.push_back(observation); } assert(timestepWeights.size() == observations.observations_.size()); assert(timestepWeights.size() > 0); ar3d::CostFunctionPtr costFunction = std::make_shared<ar3d::CostFunctionPartialObservations>(timestepWeights, observations); worker->setStatus("Solve: Create AdjointSolver"); ar3d::AdjointSolver::Settings adjointSettings = paramsReconstruction_.adjointSettings_.getSettings(); adjointSettings.variables_.currentGravity_ = results->settings_.gravity_; adjointSettings.variables_.currentGroundPlane_ = results->settings_.groundPlane_; adjointSettings.variables_.currentMassDamping_ = results->settings_.dampingAlpha_; adjointSettings.variables_.currentMass_ = results->settings_.mass_; adjointSettings.variables_.currentPoissonRatio_ = results->settings_.poissonsRatio_; adjointSettings.variables_.currentStiffnessDamping_ = results->settings_.dampingBeta_; adjointSettings.variables_.currentYoungsModulus_ = results->settings_.youngsModulus_; ar3d::AdjointSolver solver(results, adjointSettings, costFunction); worker->setStatus("Solve: Solve it!"); std::vector<ar3d::SoftBodySimulation3D::Settings> values; std::vector<ar3d::SoftBodySimulation3D::Settings> gradients; values.push_back(gridSimulation_->getSettings()); {ar3d::SoftBodySimulation3D::Settings initialGrad; memset(&initialGrad, 0, sizeof(ar3d::SoftBodySimulation3D::Settings)); gradients.push_back(initialGrad); } ar3d::AdjointSolver::Callback_t callback = [this, &values, &gradients](const ar3d::SoftBodySimulation3D::Settings& var, const ar3d::SoftBodySimulation3D::Settings& grad, ar3d::real cost) { CI_LOG_I(var); CI_LOG_I(ar3d::CoordinateTransformation::cartesian2spherical(glm::double3(var.groundPlane_.x, var.groundPlane_.y, var.groundPlane_.z))); if (costPlot->getNumPoints()==0) costPlot->addPoint(cost); costPlot->addPoint(cost); for (const auto& e : plots) std::get<0>(e)->addPoint(std::get<1>(e)(var)); values.push_back(var); gradients.push_back(grad); }; solver.solve(callback, worker); //Done! Print steps std::stringstream ss; ss << "\nCost "; for (const auto& e : plots) { if (std::get<2>(e)(paramsReconstruction_.adjointSettings_.getSettings().variables_)) ss << std::get<0>(e)->getName() << " (gradient) "; } ss << std::endl; for (int i = 1; i < values.size(); ++i) { ss << std::fixed << std::setw(12) << std::setprecision(7) << costPlot->getPoint(i) << " "; for (const auto& e : plots) { if (std::get<2>(e)(paramsReconstruction_.adjointSettings_.getSettings().variables_)) { ss << std::fixed << std::setw(12) << std::setprecision(7) << std::get<1>(e)(values[i-1]) << " (" << std::fixed << std::setw(12) << std::setprecision(7) << std::get<1>(e)(gradients[i]) << ") "; } } ss << std::endl; } CI_LOG_I(ss.str()); }; worker_->launch(task); } } void ActionReconstructionApp::reconstructionTestGradient() { if (worker_->isDone()) { ar3d::BackgroundWorker2::task task = [this](ar3d::BackgroundWorker2* worker) { worker->setStatus("Solve: Prepare input and settings"); ar3d::real timestep = 1.0 / (paramsInspectScan_.getConfig()->framerate * (paramsReconstruction_.costIntermediateSteps_ + 1)); ar3d::SimulationResults3DPtr results = std::make_shared<ar3d::SimulationResults3D>(); results->input_ = gridSimulation_->getInput(); results->settings_ = gridSimulation_->getSettings(); results->settings_.timestep_ = timestep; worker->setStatus("Solve: Load observations"); //for now, use all cameras std::vector<ar3d::real> timestepWeights; ar3d::CostFunctionPartialObservations::Observations observations; observations.noise_ = 0; //not needed because we already have the observations const auto& inputConfig = paramsInspectScan_.getConfig(); observations.numCameras_ = inputConfig->cameras.size(); observations.cameras_.resize(inputConfig->cameras.size()); for (size_t i = 0; i < inputConfig->cameras.size(); ++i) { observations.cameras_[i] = inputConfig->cameras[i].camera; } int numSteps = paramsReconstruction_.costNumSteps_ == 0 ? inputConfig->duration : paramsReconstruction_.costNumSteps_; for (int i = 0; i < numSteps; ++i) { worker->setStatus(tinyformat::format("Solve: Load observation %d / %d", (i + 1), numSteps)); //load camera images and copy them to the GPU timestepWeights.push_back(1); ar3d::CostFunctionPartialObservations::Observation observation; observation.resize(inputConfig->cameras.size()); const auto dataFrame = dataLoader_->loadFrame(i, worker); for (size_t j = 0; j < inputConfig->cameras.size(); ++j) { Eigen::Matrix<ar3d::real, Eigen::Dynamic, Eigen::Dynamic> host = dataFrame->cameraImages[j].depthMatrix.cast<ar3d::real>().matrix(); observation[j] = ar3d::CostFunctionPartialObservations::Image::fromEigen(host); } observations.observations_.push_back(observation); //add in-between frames without weight if (i < numSteps - 1) { for (int j = 0; j < paramsReconstruction_.costIntermediateSteps_; ++j) { timestepWeights.push_back(0); observations.observations_.emplace_back(); } } } assert(timestepWeights.size() == observations.observations_.size()); assert(timestepWeights.size() > 0); ar3d::CostFunctionPtr costFunction = std::make_shared<ar3d::CostFunctionPartialObservations>(timestepWeights, observations); worker->setStatus("Solve: Create AdjointSolver"); ar3d::AdjointSolver::Settings adjointSettings = paramsReconstruction_.adjointSettings_.getSettings(); adjointSettings.variables_.currentGravity_ = results->settings_.gravity_; adjointSettings.variables_.currentGroundPlane_ = results->settings_.groundPlane_; adjointSettings.variables_.currentMassDamping_ = results->settings_.dampingAlpha_; adjointSettings.variables_.currentMass_ = results->settings_.mass_; adjointSettings.variables_.currentPoissonRatio_ = results->settings_.poissonsRatio_; adjointSettings.variables_.currentStiffnessDamping_ = results->settings_.dampingBeta_; adjointSettings.variables_.currentYoungsModulus_ = results->settings_.youngsModulus_; ar3d::AdjointSolver solver(results, adjointSettings, costFunction); solver.testGradient(worker); }; worker_->launch(task); } } void ActionReconstructionApp::updateViewResult() { } void ActionReconstructionApp::draw() { using namespace ar::utils; if (paramsGeneral_.printMode_) cinder::gl::clear(cinder::Color(1, 1, 1)); else cinder::gl::clear(cinder::Color(0, 0, 0)); // WORLD SPACE ci::gl::enableDepthRead(); ci::gl::enableDepthWrite(); ci::gl::setMatrices(camera_); switch (currentState_) { case State::InspectScan: drawInspectScan(); break; case State::Reconstruction: drawReconstruction(); break; case State::ViewResult: drawViewResult(); break; } // WINDOW SPACE ci::gl::disableDepthRead(); ci::gl::disableDepthWrite(); ci::gl::setMatricesWindow(getWindowSize(), true); // Draw the background worker's status if (worker_ && !worker_->isDone()) { //draw waiting animation { ci::gl::ScopedModelMatrix scopedMatrix; ci::gl::ScopedColor scopedColor; ci::gl::translate(25, getWindowHeight() - 50); int step; double dummy; step = static_cast<int>(std::modf(getElapsedSeconds(), &dummy) * 8); for (int i = 0; i < 8; ++i) { float c = ((i + step)%8) / 7.0f; ci::gl::color(c, c, c); ci::gl::drawSolidRoundedRect(ci::Rectf(5, -2, 15, 2), 2); ci::gl::rotate(-2.0f * M_PI / 8.0f); } } //draw status cinder::gl::drawString(worker_->getStatus(), glm::vec2(50, getWindowHeight() - 50), paramsGeneral_.printMode_ ? cinder::ColorA(0, 0, 0) : cinder::ColorA(1, 1, 1)); } // Draw the interface paramsGeneralInterface_->draw(); paramsInspectScanInterface_->draw(); paramsReconstructionInterface_->draw(); paramsViewResultInterface_->draw(); tfe_->draw(); } void ActionReconstructionApp::drawGroundTruth(int frame) { if (frame >= paramsInspectScan_.getConfig()->duration) return; if (paramsInspectScan_.getConfig()->groundTruth) { //ground truth is a ball ci::gl::ScopedGlslProg glslScope(ci::gl::getStockShader(ci::gl::ShaderDef().texture())); ci::gl::ScopedTextureBind texScope(floorTexture_); ci::gl::ScopedModelMatrix scopedMatrix; const ar3d::InputGroundTruth& groundTruth = *(paramsInspectScan_.getConfig()->groundTruth); ci::gl::translate(ar::utils::toGLM(groundTruth.locations[frame])); ci::gl::rotate(ar::utils::toGLM(groundTruth.rotations[frame])); ci::gl::drawSphere(ci::vec3(0, 0, 0), groundTruth.radius, 16); } else if (paramsInspectScan_.getConfig()->groundTruthMeshes) { static int oldFrame = -1; if (oldFrame != frame) { oldFrame = frame; groundTruthVboMesh_ = nullptr; } //ground truth is a mesh if (!groundTruthVboMesh_) { //ObjLoader loader(loadFile(path + "/groundTruth/frame" + std::to_string(frame) + ".obj")); //groundTruthVboMesh = ci::gl::VboMesh::create(loader); ci::TriMeshRef triMesh = ObjectLoader::loadCustomObj(paramsInspectScan_.getConfig()->getPathToGroundTruth(frame)); groundTruthVboMesh_ = ci::gl::VboMesh::create(*triMesh); } ci::gl::ScopedGlslProg glslScope(groundTruthShader_); ci::gl::ScopedModelMatrix scopedMatrix; ci::gl::draw(groundTruthVboMesh_); } } void ActionReconstructionApp::drawProjCameraPoints(int frame) { if (frame >= paramsInspectScan_.getConfig()->duration) return; static int oldFrame = -1; if (oldFrame != frame) { oldFrame = frame; projCamPoints_ = this->meshReconstruction_->getProjectedPoints(paramsInspectScan_.getConfig(), dataLoader_->loadFrame(frame)); } ci::gl::ScopedColor col(1, 0, 1); //pointsBatch_->draw(); for (const ci::vec3& p : projCamPoints_) { ci::gl::drawCube(p, ci::vec3(0.001, 0.001, 0.001)); } } void ActionReconstructionApp::drawCameras(int frame) { if (frameData_ == nullptr && frame < paramsInspectScan_.getConfig()->duration && dataLoader_ != nullptr && (paramsGeneral_.showCameraMode_ == ParamsGeneral::ShowCameraMode::Color || paramsGeneral_.showCameraMode_ == ParamsGeneral::ShowCameraMode::Depth) && worker_->isDone()) { //load frame data ar3d::BackgroundWorker2::task task = [this, frame](ar3d::BackgroundWorker2* worker) { auto frameDataTmp = this->dataLoader_->loadFrame(frame, worker); if (worker->isInterrupted()) return; this->frameData_ = frameDataTmp; }; worker_->launch(task); } for (size_t i = 0; i < paramsInspectScan_.getConfig()->cameras.size(); ++i) { const ar3d::DataCamera& c = paramsInspectScan_.getConfig()->cameras[i].camera; //optional: show current frame if (frameData_ != nullptr) { ci::gl::ScopedModelMatrix scopedMatrix; ci::gl::multModelMatrix(c.invViewProjMatrix); //ci::gl::translate(0, 0, -1); ci::gl::translate(0, 0, paramsInspectScan_.getConfig()->viewCameraImageTranslation); ci::gl::scale(1, -1, 1); if (paramsGeneral_.showCameraMode_ == ParamsGeneral::ShowCameraMode::Color) { ci::gl::draw(frameData_->cameraImages[i].getColorTexture(), ci::Rectf(-1, -1, 1, 1)); } else if (paramsGeneral_.showCameraMode_ == ParamsGeneral::ShowCameraMode::Depth) { ci::gl::draw(frameData_->cameraImages[i].getDepthTexture(), ci::Rectf(-1, -1, 1, 1)); } } //control points ci::gl::ScopedColor scopedColor; ci::gl::color(0.8, 0.2, 0.2); double dist = paramsInspectScan_.getConfig()->viewCameraNearPlane; // = 0.0; //Near plane ci::gl::drawSphere(c.location, 0.03f, 16); ci::gl::drawSphere(c.getWorldCoordinates(glm::vec3(0, 0, dist)), 0.02f, 16); ci::gl::drawSphere(c.getWorldCoordinates(glm::vec3(1, 0, dist)), 0.02f, 16); ci::gl::drawSphere(c.getWorldCoordinates(glm::vec3(0, 1, dist)), 0.02f, 16); ci::gl::drawSphere(c.getWorldCoordinates(glm::vec3(1, 1, dist)), 0.02f, 16); //lines ci::gl::color(0.8, 0.4, 0.4); ci::gl::drawLine(c.location, c.getWorldCoordinates(glm::vec3(0, 0, dist))); ci::gl::drawLine(c.location, c.getWorldCoordinates(glm::vec3(1, 0, dist))); ci::gl::drawLine(c.location, c.getWorldCoordinates(glm::vec3(0, 1, dist))); ci::gl::drawLine(c.location, c.getWorldCoordinates(glm::vec3(1, 1, dist))); ci::gl::drawLine(c.getWorldCoordinates(glm::vec3(0, 0, dist)), c.getWorldCoordinates(glm::vec3(1, 0, dist))); ci::gl::drawLine(c.getWorldCoordinates(glm::vec3(0, 0, dist)), c.getWorldCoordinates(glm::vec3(0, 1, dist))); ci::gl::drawLine(c.getWorldCoordinates(glm::vec3(1, 1, dist)), c.getWorldCoordinates(glm::vec3(1, 0, dist))); ci::gl::drawLine(c.getWorldCoordinates(glm::vec3(1, 1, dist)), c.getWorldCoordinates(glm::vec3(0, 1, dist))); } } void ActionReconstructionApp::drawInspectScan() { if (paramsInspectScan_.getConfig() == nullptr) return; //WORLD SPACE //if (/*showFloor*/ true) { // ci::gl::ScopedGlslProg glslScope(ci::gl::getStockShader(ci::gl::ShaderDef().texture())); // ci::gl::ScopedTextureBind texScope(floorTexture_); // ci::gl::draw(floorVboMesh_); //} if (paramsGeneral_.showGroundTruth_) { drawGroundTruth(paramsInspectScan_.frame_); } if (paramsGeneral_.showCameraMode_!=ParamsGeneral::ShowCameraMode::Off) { drawCameras(paramsInspectScan_.frame_); } if (paramsGeneral_.showBoundingBox_ && meshReconstruction_) { ci::gl::ScopedColor scopedColor; //draw the camera pyramids ci::gl::color(0.5, 0.5, 1.0); for (const ar::geom3d::Pyramid& p : meshReconstruction_->getMaskPyramids()) { for (const auto& v : p.edgeRays) { ci::gl::drawLine(ar::utils::toGLM(p.center), ar::utils::toGLM((v * 5 + p.center).eval())); } } if (meshReconstruction_->getBoundingBox().isValid()) { //draw the bounding box ci::gl::color(0.2, 0.2, 1.0); drawWireCube(meshReconstruction_->getBoundingBox().min, meshReconstruction_->getBoundingBox().max); } } if (meshReconstruction_) { ar3d::WorldGridDoubleDataPtr sdf = referenceSdf_;//meshReconstruction_->getSdfData(); if (sdf) { //visualizate the SDF ar3d::WorldGridPtr grid = meshReconstruction_->getWorldGrid(); ci::gl::Texture3dRef tex = sdf->getTexture(ar3d::WorldGridData<double>::DataSource::HOST); //update volume vis and render ar3d::SoftBodyGrid3D::Input input; input.grid_ = grid; input.referenceSdf_ = sdf; volumeVis_->setInput(input); volumeVis_->setTransferFunction(tfe_->getTexture(), tfe_->getRangeMin(), tfe_->getRangeMax()); volumeVis_->draw(); if (animationTakeScreenshot_) animationRendered_ = true; } } if (paramsGeneral_.viewPoints_) { drawProjCameraPoints(paramsInspectScan_.frame_); } } void ActionReconstructionApp::drawReconstruction() { if (!gridSimulation_) return; //WORLD SPACE //ground plane if (paramsReconstruction_.simulationSettings_.getSettings().enableCollision_) { cinder::gl::ScopedModelMatrix m; glm::vec3 ref(0, 1, 0); glm::vec3 n( paramsReconstruction_.simulationSettings_.getSettings().groundPlane_.x, paramsReconstruction_.simulationSettings_.getSettings().groundPlane_.y, paramsReconstruction_.simulationSettings_.getSettings().groundPlane_.z); cinder::gl::rotate(acos(dot(ref, n)), cross(ref, n)); cinder::gl::translate(0, paramsReconstruction_.simulationSettings_.getSettings().groundPlane_.w, 0); cinder::gl::ScopedGlslProg glslScope(cinder::gl::getStockShader(cinder::gl::ShaderDef().texture())); cinder::gl::ScopedTextureBind texScope(floorTexture_); cinder::gl::draw(floorVboMesh_); } //dirichlet boundaries { cinder::gl::ScopedColor c; cinder::gl::color(0, 0, 1, 1); if (paramsReconstruction_.inputSettings_.getSettings().enableDirichlet) { drawWireCube(paramsReconstruction_.inputSettings_.getSettings().centerDirichlet - paramsReconstruction_.inputSettings_.getSettings().halfsizeDirichlet, paramsReconstruction_.inputSettings_.getSettings().centerDirichlet + paramsReconstruction_.inputSettings_.getSettings().halfsizeDirichlet); } } //reference grid bounds if (paramsGeneral_.showBoundingBox_) { ar3d::WorldGridPtr grid = gridSimulation_->getState().advectedSDF_ != nullptr ? gridSimulation_->getState().advectedSDF_->getGrid() : gridSimulation_->getInput().grid_; cinder::gl::ScopedColor scopedColor; cinder::gl::color(0.2, 1.0, 0.2); Eigen::Vector3d voxelSize(grid->getVoxelSize(), grid->getVoxelSize(), grid->getVoxelSize()); Eigen::Vector3d minCorner = (grid->getOffset()).cast<double>().array() * voxelSize.array(); Eigen::Vector3d maxCorner = (grid->getOffset() + grid->getSize() + Eigen::Vector3i(1, 1, 1)).cast<double>().array() * voxelSize.array(); drawWireCube(minCorner, maxCorner); } //advected grid bounds if (paramsGeneral_.showBoundingBox_) { cinder::gl::ScopedColor scopedColor; cinder::gl::color(1.0, 0.2, 0.2); const Eigen::Vector3d& minCorner = gridSimulation_->getState().advectedBoundingBox_.min; const Eigen::Vector3d& maxCorner = gridSimulation_->getState().advectedBoundingBox_.max; drawWireCube(minCorner, maxCorner); } if (paramsGeneral_.showGroundTruth_) { drawGroundTruth(frameCounter_); } if (paramsGeneral_.showCameraMode_ != ParamsGeneral::ShowCameraMode::Off) { drawCameras(frameCounter_); } //partial observation vis if (visualizePartialObservations_) { observationVis_.draw(); } //main vis volumeVis_->setTransferFunction(tfe_->getTexture(), tfe_->getRangeMin(), tfe_->getRangeMax()); volumeVis_->draw(); if (animationTakeScreenshot_) animationRendered_ = true; //points if (paramsGeneral_.viewPoints_) { drawProjCameraPoints(frameCounter_/(paramsReconstruction_.costIntermediateSteps_+1)); } // WINDOW SPACE cinder::gl::disableDepthRead(); cinder::gl::disableDepthWrite(); cinder::gl::setMatricesWindow(getWindowSize(), true); //draw plots { const int offset = 10; const int width = 0.2 * getWindowWidth(); int numPlots = 1; for (const auto& e : plots) if (std::get<2>(e)(paramsReconstruction_.adjointSettings_.getSettings().variables_)) numPlots++; numPlots = ::max(4, numPlots); const int height = getWindowHeight() / numPlots; costPlot->setBoundingRect(cinder::Rectf(getWindowWidth() - width, offset, getWindowWidth() - offset, height - offset)); costPlot->setPrintMode(paramsGeneral_.printMode_); costPlot->draw(); int y = 1; for (const auto& e : plots) { if (!std::get<2>(e)(paramsReconstruction_.adjointSettings_.getSettings().variables_)) continue; std::get<0>(e)->setBoundingRect(cinder::Rectf(getWindowWidth() - width, y * height + offset, getWindowWidth() - offset, (y + 1) * height - offset)); std::get<0>(e)->setPrintMode(paramsGeneral_.printMode_); std::get<0>(e)->draw(); y++; } } } void ActionReconstructionApp::drawViewResult() { } void ActionReconstructionApp::nextReconstruction() { //check if we are ready to continue if (!worker_->isDone() || !meshReconstruction_) { CI_LOG_E("Input Reconstruction not completed yet"); return; } //pass the reference SDF to the next stage auto s = paramsReconstruction_.simulationSettings_.getSettings(); s.timestep_ = 1.0 / paramsInspectScan_.getConfig()->framerate; paramsReconstruction_.simulationSettings_.setSettings(s); //initialize forward simulation paramsReconstruction_.setWorldGrid(referenceSdf_->getGrid()); //update UI currentState_ = State::Reconstruction; paramsInspectScanInterface_->show(false); paramsReconstructionInterface_->show(true); } void ActionReconstructionApp::prevInspectScan() { //TODO } void ActionReconstructionApp::nextViewResult() { //TODO } void ActionReconstructionApp::prevReconstruction() { //TODO } void ActionReconstructionApp::cleanup() { if (worker_) { worker_->interrupt(); worker_->wait(); } gridSimulation_.reset(); dataLoader_.reset(); frameData_.reset(); referenceSdf_.reset(); cudaPrintfEnd(); App::quit(); } void ActionReconstructionApp::drawWireCube(const Eigen::Vector3d& ea, const Eigen::Vector3d& eb) { glm::vec3 a = utils::toGLM(ea); glm::vec3 b = utils::toGLM(eb); ci::gl::drawLine(a, glm::vec3(a.x, a.y, b.z)); ci::gl::drawLine(a, glm::vec3(a.x, b.y, a.z)); ci::gl::drawLine(a, glm::vec3(b.x, a.y, a.z)); ci::gl::drawLine(glm::vec3(a.x, b.y, b.z), glm::vec3(a.x, a.y, b.z)); ci::gl::drawLine(glm::vec3(b.x, a.y, b.z), glm::vec3(a.x, a.y, b.z)); ci::gl::drawLine(glm::vec3(a.x, b.y, b.z), glm::vec3(a.x, b.y, a.z)); ci::gl::drawLine(glm::vec3(b.x, b.y, a.z), glm::vec3(a.x, b.y, a.z)); ci::gl::drawLine(glm::vec3(b.x, b.y, a.z), glm::vec3(b.x, a.y, a.z)); ci::gl::drawLine(glm::vec3(b.x, a.y, b.z), glm::vec3(b.x, a.y, a.z)); ci::gl::drawLine(b, glm::vec3(a.x, b.y, b.z)); ci::gl::drawLine(b, glm::vec3(b.x, b.y, a.z)); ci::gl::drawLine(b, glm::vec3(b.x, a.y, b.z)); } void ActionReconstructionApp::drawWireCube(const ar3d::real3 & a, const ar3d::real3 & b) { drawWireCube(Eigen::Vector3d(a.x, a.y, a.z), Eigen::Vector3d(b.x, b.y, b.z)); } void ActionReconstructionApp::load() { cinder::fs::path initialPath = ""; cinder::fs::path path = getOpenFilePath(initialPath, std::vector<std::string>({ "json" })); if (path.empty()) { CI_LOG_I("loading cancelled"); return; } cinder::DataSourceRef source = cinder::loadFile(path); if (!source) { CI_LOG_E("Unable to load file " << path.string()); return; } cinder::JsonTree root; try { root = cinder::JsonTree(source); } catch (const cinder::JsonTree::ExcJsonParserError& ex) { CI_LOG_E("Unable to load json file " << path.string() << ": " << ex.what()); return; } paramsGeneral_.load(root.getChild("General")); paramsInspectScan_.load(root.getChild("InspectScan")); paramsReconstruction_.load(root.getChild("Reconstruction")); paramsViewResult_.load(root.getChild("ViewResult")); } void ActionReconstructionApp::save() { cinder::fs::path initialPath = ""; cinder::fs::path path = getSaveFilePath(initialPath, std::vector<std::string>({ "json" })); if (path.empty()) { CI_LOG_I("saving cancelled"); return; } path.replace_extension("json"); cinder::JsonTree root = cinder::JsonTree::makeObject(); { cinder::JsonTree child = cinder::JsonTree::makeObject("General"); paramsGeneral_.save(child); root.addChild(child); } { cinder::JsonTree child = cinder::JsonTree::makeObject("InspectScan"); paramsInspectScan_.save(child); root.addChild(child); } { cinder::JsonTree child = cinder::JsonTree::makeObject("Reconstruction"); paramsReconstruction_.save(child); root.addChild(child); } { cinder::JsonTree child = cinder::JsonTree::makeObject("ViewResult"); paramsViewResult_.save(child); root.addChild(child); } root.write(path); CI_LOG_I("Saved to " << path.string()); } #if 1 CINDER_APP( ActionReconstructionApp, RendererGl(RendererGl::Options().msaa(4)), [&](App::Settings *settings) { settings->setWindowSize(1600, 900); } ) #endif
34d47c6e252d0c28cdba99c46897174037399f34.cu
#include "ActionReconstructionApp.h" #include <cmath> #include <chrono> #include <thread> #include <time.h> #include <cinder/app/App.h> #include <cinder/app/RendererGl.h> #include <cinder/gl/gl.h> #include <cinder/CameraUi.h> #include <cinder/params/Params.h> #include <cinder/Log.h> #include <cinder/ObjLoader.h> #include <Utils.h> #include <InputConfig.h> #include <DataCamera.h> #include <InputDataLoader.h> #include <BackgroundWorker.h> #include <MeshReconstruction.h> #include <TransferFunctionEditor.h> #include <VolumeVisualization.h> #include <CoordinateTransformation.h> #include <cuPrintf.cuh> #include <tinyformat.h> #include <ObjectLoader.h> #include <GroundTruthToSdf.h> #include "resources/Resources.h" #include "Parameters.h" using namespace ci::app; using namespace std; using namespace ar; ActionReconstructionApp::ActionReconstructionApp() : currentState_(State::InspectScan) { worker_ = std::make_unique<ar3d::BackgroundWorker2>(); std::time_t result = std::time(nullptr); ci::log::makeLogger<ci::log::LoggerFile>(tinyformat::format("%sMainApp%s.log", getAppPath().string(), std::asctime(std::localtime(&result))), true); } void ActionReconstructionApp::setup() { CUMAT_SAFE_CALL(cudaPrintfInit()); //parameter ui, must happen before user-camera paramsGeneralInterface_ = ci::params::InterfaceGl::create(getWindow(), "General", toPixels(ci::ivec2(300, 250))); paramsInspectScanInterface_ = ci::params::InterfaceGl::create(getWindow(), "Inspect Scan", toPixels(ci::ivec2(300, 580))); paramsReconstructionInterface_ = ci::params::InterfaceGl::create(getWindow(), "Reconstruction", toPixels(ci::ivec2(300, 580))); paramsViewResultInterface_ = ci::params::InterfaceGl::create(getWindow(), "View Result", toPixels(ci::ivec2(300, 580))); paramsReconstructionInterface_->show(false); paramsViewResultInterface_->show(false); //transfer function editor tfe_ = std::make_unique<TransferFunctionEditor>(getWindow()); //volume visualization volumeVis_ = std::make_unique<ar3d::VolumeVisualization>(getWindow(), &camera_, &paramsGeneral_.volumeVisParams_); //user-camera camUi_ = ci::CameraUi(&camera_, getWindow()); ci::vec3 newEye = camera_.getEyePoint() + camera_.getViewDirection() * (camera_.getPivotDistance() * (1 - 0.2f)); camera_.setEyePoint(newEye); camera_.setPivotDistance(camera_.getPivotDistance() * 0.2f); //floor auto plane = cinder::geom::Plane().size(ci::vec2(4, 4)).subdivisions(ci::ivec2(5, 5)); vector<ci::gl::VboMesh::Layout> bufferLayout = { ci::gl::VboMesh::Layout().usage(GL_DYNAMIC_DRAW).attrib(cinder::geom::Attrib::POSITION, 3), ci::gl::VboMesh::Layout().usage(GL_STATIC_DRAW).attrib(cinder::geom::Attrib::TEX_COORD_0, 2) }; floorVboMesh_ = ci::gl::VboMesh::create(plane, bufferLayout); ci::gl::Texture::Format floorTextureFmt; floorTextureFmt.enableMipmapping(true); floorTextureFmt.setMinFilter(GL_LINEAR_MIPMAP_LINEAR); floorTexture_ = ci::gl::Texture::create(loadImage(loadResource(CHECKERBOARD_IMAGE)), floorTextureFmt); //ground truth shader groundTruthShader_ = ci::gl::GlslProg::create(ci::gl::GlslProg::Format() .vertex(R"GLSL( #version 150 uniform mat4 ciModelViewProjection; in vec4 ciPosition; in vec4 ciColor; out vec4 Color; void main(void) { gl_Position = ciModelViewProjection * ciPosition; Color = ciColor; } )GLSL") .fragment(R"GLSL( #version 150 in vec4 Color; out vec4 oColor; void main(void) { oColor = Color; } )GLSL" )); setupGeneral(); setupInspectScan(); setupReconstruction(); setupViewResult(); } void ActionReconstructionApp::setupGeneral() { paramsGeneralInterface_->addButton("Load", [this]() {this->load(); }); paramsGeneralInterface_->addButton("Save", [this]() {this->save(); }); paramsGeneralInterface_->addParam("Rendering-SaveFrameNames", &frameNames_).label("Frame Names"); paramsGeneralInterface_->addParam("Rendering-ExportWithNormals", &exportWithNormals_).group("Rendering").label("Export \\w normals + orig.pos."); paramsGeneral_.addParams(paramsGeneralInterface_); paramsGeneralInterface_->setPosition(glm::ivec2(5, 590)); } void ActionReconstructionApp::setupInspectScan() { paramsInspectScan_.addParams(paramsInspectScanInterface_); paramsInspectScanInterface_->addButton("InspectUseGroundTruth", [this]() {this->inspectScanUseGroundTruth(); }, "label='Use Ground Truth'"); paramsInspectScanInterface_->addButton("Next-1-2", [this]() {this->nextReconstruction(); }, "label='Next - Reconstruction'"); paramsInspectScanInterface_->setPosition(glm::ivec2(5, 5)); } void ActionReconstructionApp::setupReconstruction() { visualizePartialObservations_ = false; paramsReconstruction_.addParams(paramsReconstructionInterface_); paramsReconstructionInterface_->addButton("RecForwardStep", [this]() {this->reconstructionForwardStep(); }, "label='Forward Step' "); paramsReconstructionInterface_->addButton("RecReset", [this]() {this->reconstructionReset(); }, "label='Reset' key=r "); paramsReconstructionInterface_->addButton("RecSolve", [this]() {this->reconstructionSolve(); }, "label='Solve' key=Return "); paramsReconstructionInterface_->addButton("RecTest", [this]() {this->reconstructionTestGradient(); }, "label='Test Gradient' "); paramsReconstructionInterface_->addParam("RecPartObsVis", &visualizePartialObservations_).label("Visualize Observations"); paramsReconstructionInterface_->addButton("Prev-2-1", [this]() {this->prevInspectScan(); }, "label='Prev - Inspect Scan'"); paramsReconstructionInterface_->addButton("Next-2-3", [this]() {this->nextViewResult(); }, "label='Next - View Result'"); paramsReconstructionInterface_->setPosition(glm::ivec2(5, 5)); //plots typedef ar3d::AdjointSolver::InputVariables V; typedef ar3d::SoftBodySimulation3D::Settings S; costPlot = std::make_unique<GraphPlot>("Cost"); costPlot->setTrueValue(0); //plots.emplace_back(std::make_unique<GraphPlot>("GravityX"), [](const S& var) {return var.gravity_.x; }, [](const V& var) {return var.optimizeGravity_; }); plots.emplace_back(std::make_unique<GraphPlot>("GravityY"), [](const S& var) {return var.gravity_.y; }, [](const V& var) {return var.optimizeGravity_; }); //plots.emplace_back(std::make_unique<GraphPlot>("GravityZ"), [](const S& var) {return var.gravity_.z; }, [](const V& var) {return var.optimizeGravity_; }); plots.emplace_back(std::make_unique<GraphPlot>("Young's Modulus"), [](const S& var) {return var.youngsModulus_; }, [](const V& var) {return var.optimizeYoungsModulus_; }); plots.emplace_back(std::make_unique<GraphPlot>("Poisson Ratio"), [](const S& var) {return var.poissonsRatio_; }, [](const V& var) {return var.optimizePoissonRatio_; }); plots.emplace_back(std::make_unique<GraphPlot>("Mass"), [](const S& var) {return var.mass_; }, [](const V& var) {return var.optimizeMass_; }); plots.emplace_back(std::make_unique<GraphPlot>("Mass Damping"), [](const S& var) {return var.dampingAlpha_; }, [](const V& var) {return var.optimizeMassDamping_; }); plots.emplace_back(std::make_unique<GraphPlot>("Stiffness Damping"), [](const S& var) {return var.dampingBeta_; }, [](const V& var) {return var.optimizeStiffnessDamping_; }); plots.emplace_back(std::make_unique<GraphPlot>("LinearVelocityX"), [](const S& var) {return var.initialLinearVelocity_.x; }, [](const V& var) {return var.optimizeInitialLinearVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("LinearVelocityY"), [](const S& var) {return var.initialLinearVelocity_.y; }, [](const V& var) {return var.optimizeInitialLinearVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("LinearVelocityZ"), [](const S& var) {return var.initialLinearVelocity_.z; }, [](const V& var) {return var.optimizeInitialLinearVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("AngularVelocityX"), [](const S& var) {return var.initialAngularVelocity_.x; }, [](const V& var) {return var.optimizeInitialAngularVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("AngularVelocityY"), [](const S& var) {return var.initialAngularVelocity_.y; }, [](const V& var) {return var.optimizeInitialAngularVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("AngularVelocityZ"), [](const S& var) {return var.initialAngularVelocity_.z; }, [](const V& var) {return var.optimizeInitialAngularVelocity_; }); plots.emplace_back(std::make_unique<GraphPlot>("Ground Plane Theta"), [](const S& var) {return ar3d::CoordinateTransformation::cartesian2spherical(var.groundPlane_).y; }, [](const V& var) {return var.optimizeGroundPlane_; }); plots.emplace_back(std::make_unique<GraphPlot>("Ground Plane Phi"), [](const S& var) {return ar3d::CoordinateTransformation::cartesian2spherical(var.groundPlane_).z; }, [](const V& var) {return var.optimizeGroundPlane_; }); plots.emplace_back(std::make_unique<GraphPlot>("Ground Plane Height"), [](const S& var) {return var.groundPlane_.w; }, [](const V& var) {return var.optimizeGroundPlane_; }); } void ActionReconstructionApp::setupViewResult() { paramsViewResult_.addParams(paramsViewResultInterface_); paramsViewResultInterface_->addButton("Prev-3-2", [this]() {this->prevReconstruction(); }, "label='Prev - Reconstruction'"); paramsViewResultInterface_->setPosition(glm::ivec2(5, 5)); } void ActionReconstructionApp::keyDown(KeyEvent event) { App::keyDown(event); if (event.getChar() == 'f') { // Toggle full screen when the user presses the 'f' key. setFullScreen(!isFullScreen()); } else if (event.getCode() == KeyEvent::KEY_ESCAPE) { // Exit full screen, or quit the application, when the user presses the ESC key. if (isFullScreen()) setFullScreen(false); else quit(); } else if (event.getChar() == 'p') { //Screenshot ci::Surface surface = copyWindowSurface(); //construct filename time_t now = time(NULL); struct tm tstruct; char buf[100]; localtime_s(&tstruct, &now); strftime(buf, sizeof(buf), "%d-%m-%Y_%H-%M-%S", &tstruct); string fileName = string("screenshot-") + string(buf) + ".png"; //write out ci::writeImage(fileName, surface); } else if (event.getChar() == 'l') { //reload resources, shaders, ... volumeVis_->reloadResources(); } else if (event.getCode() == KeyEvent::KEY_SPACE) { spaceBarPressed_ = true; } } void ActionReconstructionApp::keyUp(KeyEvent event) { App::keyUp(event); if (event.getCode() == KeyEvent::KEY_SPACE && spaceBarPressed_) { spaceBarPressed_ = false; } } void ActionReconstructionApp::mouseDown( MouseEvent event ) { App::mouseDown(event); } void ActionReconstructionApp::update() { //save animation from previous frame if (animationTakeScreenshot_ && animationRendered_ && !frameNames_.empty()) { //Screenshot cinder::Surface surface = copyWindowSurface(); string fileName = tinyformat::format("../screenshots/%s%05d.png", frameNames_.c_str(), frameCounter_); writeImage(fileName, surface); CI_LOG_I("screenshot saved to " << fileName); ////SDF file //if (gridSimulation_) { // string fileName = tinyformat::format("../screenshots/%s%05d.sdf", frameNames_.c_str(), frameCounter_); // volumeVis_->saveSdf(fileName); //} //Marching Cubes mesh file if (gridSimulation_) { string fileName = tinyformat::format("../screenshots/%s%05d.obj", frameNames_.c_str(), frameCounter_); volumeVis_->saveMCMesh(fileName); } //High Resolution mesh file if (gridSimulation_) { string fileName = tinyformat::format("../screenshots/%s_high%05d.obj", frameNames_.c_str(), frameCounter_); volumeVis_->saveHighResultMesh(fileName, exportWithNormals_, exportWithNormals_); } //done } if (animationTakeScreenshot_ && animationRendered_) { animationRendered_ = false; animationTakeScreenshot_ = false; } switch (currentState_) { case State::InspectScan: updateInspectScan(); break; case State::Reconstruction: updateReconstruction(); break; case State::ViewResult: updateViewResult(); break; } //update transfer function editor tfe_->setVisible(volumeVis_->needsTransferFunction()); tfe_->update(); } void ActionReconstructionApp::updateInspectScan() { if (!worker_->isDone()) return; int hasChanges = -1; static ar3d::InputConfigPtr oldConfigPtr = nullptr; static int oldFrame = -1; static ar3d::MeshReconstruction::Settings oldMeshReconstructionSettings; //check, if the input file has changed const ar3d::InputConfigPtr config = paramsInspectScan_.getConfig(); if (!config) return; if (oldConfigPtr != config) { oldConfigPtr = paramsInspectScan_.getConfig(); //create data loader dataLoader_ = std::make_shared<ar3d::InputDataLoader>(paramsInspectScan_.getConfig()); hasChanges = 0; //reset oldFrame = -1; memset(&oldMeshReconstructionSettings, 0, sizeof(ar3d::MeshReconstruction::Settings)); groundTruthVboMesh_ = nullptr; frameData_ = nullptr; meshReconstruction_ = nullptr; } //check if the frame has changed const int frame = paramsInspectScan_.frame_; if (oldFrame != frame) { oldFrame = paramsInspectScan_.frame_; groundTruthVboMesh_ = nullptr; hasChanges = 0; } //check if the reconstruction settings have changed const ar3d::MeshReconstruction::Settings settings = paramsInspectScan_.meshReconstructionSettings.getSettings(); if (memcmp(&oldMeshReconstructionSettings, &settings, sizeof(ar3d::MeshReconstruction::Settings)) != 0) { oldMeshReconstructionSettings = paramsInspectScan_.meshReconstructionSettings.getSettings(); if (hasChanges == -1) hasChanges = 0; } //load in a background thread if (hasChanges >= 0) { //declare background task std::function<void(ar3d::BackgroundWorker2*)> task = [this, hasChanges, frame, settings, config] (ar3d::BackgroundWorker2* worker) { if (hasChanges <= 0) { //These steps only depend on the frame worker->setStatus("Load color and depth images"); auto frameDataTmp = this->dataLoader_->loadFrame(frame, worker); if (worker->isInterrupted()) return; this->frameData_ = frameDataTmp; worker->setStatus("Initialize mesh reconstruction"); auto meshReconstructionTmp = make_shared<ar3d::MeshReconstruction>(config, this->frameData_); if (worker->isInterrupted()) return; this->meshReconstruction_ = meshReconstructionTmp; worker->setStatus("Segment images"); this->meshReconstruction_->runSegmentation(worker); if (worker->isInterrupted()) return; worker->setStatus("Find bounding box"); this->meshReconstruction_->runBoundingBoxExtraction(worker); if (worker->isInterrupted()) return; } if (hasChanges <= 1) { //These depend on MeshReconstruction::Settings this->meshReconstruction_->settings = settings; worker->setStatus("Initialize 3D reconstruction"); this->meshReconstruction_->runInitReconstruction(worker); if (worker->isInterrupted()) return; worker->setStatus("Optimize 3D reconstruction"); this->meshReconstruction_->runReconstruction(worker); if (worker->isInterrupted()) return; worker->setStatus("Recover full signed distance function"); this->meshReconstruction_->runSignedDistanceReconstruction(worker); if (worker->isInterrupted()) return; referenceSdf_ = meshReconstruction_->getSdfData(); } frameCounter_ = frame; animationTakeScreenshot_ = true; }; //start background worker worker_->launch(task); CI_LOG_I("Background worker started from section " << hasChanges); } } void ActionReconstructionApp::inspectScanUseGroundTruth() { if (!worker_->isDone()) return; if (!paramsInspectScan_.getConfig()->groundTruthMeshes && !paramsInspectScan_.getConfig()->groundTruth) { CI_LOG_W("input dataset does not contain ground thruth data"); return; } std::function<void(ar3d::BackgroundWorker2*)> task = [this](ar3d::BackgroundWorker2* worker) { worker->setStatus("Convert ground truth to SDF"); int frame = paramsInspectScan_.frame_; int resolution = paramsInspectScan_.meshReconstructionSettings.getSettings().gridResolution; referenceSdf_ = ar3d::groundTruthToSdf(paramsInspectScan_.getConfig(), frame, resolution); }; worker_->launch(task); } void ActionReconstructionApp::updateReconstruction() { static ar3d::WorldGridRealDataPtr oldSdf = nullptr; static ar3d::SoftBodySimulation3D::InputSdfSettings oldInputSettings; static ar3d::SoftBodySimulation3D::Settings oldElasticitySettings; if (!worker_->isDone()) return; if (oldSdf != referenceSdf_ || oldInputSettings != paramsReconstruction_.inputSettings_.getSettings()) { //input changed oldSdf = referenceSdf_; oldInputSettings = paramsReconstruction_.inputSettings_.getSettings(); oldElasticitySettings = paramsReconstruction_.simulationSettings_.getSettings(); ar3d::BackgroundWorker2::task task = [this](ar3d::BackgroundWorker2* worker) { gridSimulation_.reset(); worker->setStatus("Create Input"); ar3d::SoftBodyGrid3D::Input input = ar3d::SoftBodyGrid3D::createFromSdf( this->paramsReconstruction_.inputSettings_.getSettings(), this->referenceSdf_); this->gridSimulation_ = std::make_unique<ar3d::SoftBodyGrid3D>(input); this->gridSimulation_->setSettings(paramsReconstruction_.simulationSettings_.getSettings()); this->gridSimulation_->setRecordTimings(true); worker->setStatus("Prepare rendering"); this->volumeVis_->setInput(this->gridSimulation_->getInput()); //results->input_ = gridSimulation->getInput(); frameCounter_ = 0; frameData_ = nullptr; }; worker_->launch(task); } if (!worker_->isDone()) return; if (oldElasticitySettings != paramsReconstruction_.simulationSettings_.getSettings()) { //elasticity settings changed oldElasticitySettings = paramsReconstruction_.simulationSettings_.getSettings(); ar3d::BackgroundWorker2::task task = [this](ar3d::BackgroundWorker2* worker) { if (gridSimulation_) { gridSimulation_->setSettings(paramsReconstruction_.simulationSettings_.getSettings()); //results->settings_ = simulationSettings_.getSettings(); } }; worker_->launch(task); } if (spaceBarPressed_ && !animationTakeScreenshot_) //space bar pressed and current frame saved reconstructionForwardStep(); } void ActionReconstructionApp::reconstructionForwardStep() { if (worker_->isDone() && !animationTakeScreenshot_) { ar3d::BackgroundWorker2::task task = [this](ar3d::BackgroundWorker2* worker) { gridSimulation_->solve(true, worker, true); volumeVis_->update(gridSimulation_->getState()); worker->setStatus("Prepare partial observation visualization"); frameData_ = dataLoader_->loadFrame(frameCounter_/(paramsReconstruction_.costIntermediateSteps_+1), worker); if (visualizePartialObservations_) { observationVis_.setObservation( paramsInspectScan_.getConfig()->cameras[0].camera, frameData_->cameraImages[0].depthMatrix, gridSimulation_->getInput(), gridSimulation_->getState()); } frameCounter_++; animationTakeScreenshot_ = true; animationRendered_ = false; }; worker_->launch(task); } } void ActionReconstructionApp::reconstructionResetPlots() { costPlot->clear(); for (const auto& e : plots) { std::get<0>(e)->clear(); } } void ActionReconstructionApp::reconstructionReset() { CI_LOG_I("Reset simulation"); //wait for the current task if (worker_) { worker_->interrupt(); worker_->wait(); } //reset simulation frameCounter_ = 0; frameData_ = nullptr; if (gridSimulation_) { gridSimulation_->reset(); volumeVis_->update(gridSimulation_->getState()); } //reset plots reconstructionResetPlots(); } void ActionReconstructionApp::reconstructionSolve() { if (worker_->isDone()) { //setup plots reconstructionResetPlots(); costPlot->setMaxPoints(paramsReconstruction_.adjointSettings_.getSettings().numIterations_+1); for (const auto& e : plots) { std::get<0>(e)->setMaxPoints(paramsReconstruction_.adjointSettings_.getSettings().numIterations_+1); std::get<0>(e)->setTrueValue(std::get<1>(e)(paramsReconstruction_.simulationSettings_.getSettings())); std::get<0>(e)->addPoint(std::get<1>(e)(paramsReconstruction_.simulationSettings_.getSettings())); } //declare worker ar3d::BackgroundWorker2::task task = [this](ar3d::BackgroundWorker2* worker) { worker->setStatus("Solve: Prepare input and settings"); ar3d::real timestep = 1.0 / (paramsInspectScan_.getConfig()->framerate * (paramsReconstruction_.costIntermediateSteps_ + 1)); ar3d::SimulationResults3DPtr results = std::make_shared<ar3d::SimulationResults3D>(); results->input_ = gridSimulation_->getInput(); results->settings_ = gridSimulation_->getSettings(); results->settings_.timestep_ = timestep; worker->setStatus("Solve: Load observations"); //for now, use all cameras std::vector<ar3d::real> timestepWeights; ar3d::CostFunctionPartialObservations::Observations observations; observations.gpuEvaluate_ = true; observations.maxSdf_ = 5; observations.noise_ = 0; //not needed because we already have the observations const auto& inputConfig = paramsInspectScan_.getConfig(); observations.numCameras_ = inputConfig->cameras.size(); observations.cameras_.resize(inputConfig->cameras.size()); for (size_t i = 0; i < inputConfig->cameras.size(); ++i) { observations.cameras_[i] = inputConfig->cameras[i].camera; } int numSteps = paramsReconstruction_.costNumSteps_ == 0 ? inputConfig->duration-1 : paramsReconstruction_.costNumSteps_; for (int i = 1; i <= numSteps; ++i) { worker->setStatus(tinyformat::format("Solve: Load observation %d / %d", i, numSteps)); //add in-between frames without weight for (int j = 0; j < paramsReconstruction_.costIntermediateSteps_; ++j) { timestepWeights.push_back(0); observations.observations_.emplace_back(); } //load camera images and copy them to the GPU timestepWeights.push_back(1); ar3d::CostFunctionPartialObservations::Observation observation; observation.resize(inputConfig->cameras.size()); const auto dataFrame = dataLoader_->loadFrame(i, worker); for (size_t j = 0; j < inputConfig->cameras.size(); ++j) { Eigen::Matrix<ar3d::real, Eigen::Dynamic, Eigen::Dynamic> host = dataFrame->cameraImages[j].depthMatrix.cast<ar3d::real>().matrix(); observation[j] = ar3d::CostFunctionPartialObservations::Image::fromEigen(host); } observations.observations_.push_back(observation); } assert(timestepWeights.size() == observations.observations_.size()); assert(timestepWeights.size() > 0); ar3d::CostFunctionPtr costFunction = std::make_shared<ar3d::CostFunctionPartialObservations>(timestepWeights, observations); worker->setStatus("Solve: Create AdjointSolver"); ar3d::AdjointSolver::Settings adjointSettings = paramsReconstruction_.adjointSettings_.getSettings(); adjointSettings.variables_.currentGravity_ = results->settings_.gravity_; adjointSettings.variables_.currentGroundPlane_ = results->settings_.groundPlane_; adjointSettings.variables_.currentMassDamping_ = results->settings_.dampingAlpha_; adjointSettings.variables_.currentMass_ = results->settings_.mass_; adjointSettings.variables_.currentPoissonRatio_ = results->settings_.poissonsRatio_; adjointSettings.variables_.currentStiffnessDamping_ = results->settings_.dampingBeta_; adjointSettings.variables_.currentYoungsModulus_ = results->settings_.youngsModulus_; ar3d::AdjointSolver solver(results, adjointSettings, costFunction); worker->setStatus("Solve: Solve it!"); std::vector<ar3d::SoftBodySimulation3D::Settings> values; std::vector<ar3d::SoftBodySimulation3D::Settings> gradients; values.push_back(gridSimulation_->getSettings()); {ar3d::SoftBodySimulation3D::Settings initialGrad; memset(&initialGrad, 0, sizeof(ar3d::SoftBodySimulation3D::Settings)); gradients.push_back(initialGrad); } ar3d::AdjointSolver::Callback_t callback = [this, &values, &gradients](const ar3d::SoftBodySimulation3D::Settings& var, const ar3d::SoftBodySimulation3D::Settings& grad, ar3d::real cost) { CI_LOG_I(var); CI_LOG_I(ar3d::CoordinateTransformation::cartesian2spherical(glm::double3(var.groundPlane_.x, var.groundPlane_.y, var.groundPlane_.z))); if (costPlot->getNumPoints()==0) costPlot->addPoint(cost); costPlot->addPoint(cost); for (const auto& e : plots) std::get<0>(e)->addPoint(std::get<1>(e)(var)); values.push_back(var); gradients.push_back(grad); }; solver.solve(callback, worker); //Done! Print steps std::stringstream ss; ss << "\nCost "; for (const auto& e : plots) { if (std::get<2>(e)(paramsReconstruction_.adjointSettings_.getSettings().variables_)) ss << std::get<0>(e)->getName() << " (gradient) "; } ss << std::endl; for (int i = 1; i < values.size(); ++i) { ss << std::fixed << std::setw(12) << std::setprecision(7) << costPlot->getPoint(i) << " "; for (const auto& e : plots) { if (std::get<2>(e)(paramsReconstruction_.adjointSettings_.getSettings().variables_)) { ss << std::fixed << std::setw(12) << std::setprecision(7) << std::get<1>(e)(values[i-1]) << " (" << std::fixed << std::setw(12) << std::setprecision(7) << std::get<1>(e)(gradients[i]) << ") "; } } ss << std::endl; } CI_LOG_I(ss.str()); }; worker_->launch(task); } } void ActionReconstructionApp::reconstructionTestGradient() { if (worker_->isDone()) { ar3d::BackgroundWorker2::task task = [this](ar3d::BackgroundWorker2* worker) { worker->setStatus("Solve: Prepare input and settings"); ar3d::real timestep = 1.0 / (paramsInspectScan_.getConfig()->framerate * (paramsReconstruction_.costIntermediateSteps_ + 1)); ar3d::SimulationResults3DPtr results = std::make_shared<ar3d::SimulationResults3D>(); results->input_ = gridSimulation_->getInput(); results->settings_ = gridSimulation_->getSettings(); results->settings_.timestep_ = timestep; worker->setStatus("Solve: Load observations"); //for now, use all cameras std::vector<ar3d::real> timestepWeights; ar3d::CostFunctionPartialObservations::Observations observations; observations.noise_ = 0; //not needed because we already have the observations const auto& inputConfig = paramsInspectScan_.getConfig(); observations.numCameras_ = inputConfig->cameras.size(); observations.cameras_.resize(inputConfig->cameras.size()); for (size_t i = 0; i < inputConfig->cameras.size(); ++i) { observations.cameras_[i] = inputConfig->cameras[i].camera; } int numSteps = paramsReconstruction_.costNumSteps_ == 0 ? inputConfig->duration : paramsReconstruction_.costNumSteps_; for (int i = 0; i < numSteps; ++i) { worker->setStatus(tinyformat::format("Solve: Load observation %d / %d", (i + 1), numSteps)); //load camera images and copy them to the GPU timestepWeights.push_back(1); ar3d::CostFunctionPartialObservations::Observation observation; observation.resize(inputConfig->cameras.size()); const auto dataFrame = dataLoader_->loadFrame(i, worker); for (size_t j = 0; j < inputConfig->cameras.size(); ++j) { Eigen::Matrix<ar3d::real, Eigen::Dynamic, Eigen::Dynamic> host = dataFrame->cameraImages[j].depthMatrix.cast<ar3d::real>().matrix(); observation[j] = ar3d::CostFunctionPartialObservations::Image::fromEigen(host); } observations.observations_.push_back(observation); //add in-between frames without weight if (i < numSteps - 1) { for (int j = 0; j < paramsReconstruction_.costIntermediateSteps_; ++j) { timestepWeights.push_back(0); observations.observations_.emplace_back(); } } } assert(timestepWeights.size() == observations.observations_.size()); assert(timestepWeights.size() > 0); ar3d::CostFunctionPtr costFunction = std::make_shared<ar3d::CostFunctionPartialObservations>(timestepWeights, observations); worker->setStatus("Solve: Create AdjointSolver"); ar3d::AdjointSolver::Settings adjointSettings = paramsReconstruction_.adjointSettings_.getSettings(); adjointSettings.variables_.currentGravity_ = results->settings_.gravity_; adjointSettings.variables_.currentGroundPlane_ = results->settings_.groundPlane_; adjointSettings.variables_.currentMassDamping_ = results->settings_.dampingAlpha_; adjointSettings.variables_.currentMass_ = results->settings_.mass_; adjointSettings.variables_.currentPoissonRatio_ = results->settings_.poissonsRatio_; adjointSettings.variables_.currentStiffnessDamping_ = results->settings_.dampingBeta_; adjointSettings.variables_.currentYoungsModulus_ = results->settings_.youngsModulus_; ar3d::AdjointSolver solver(results, adjointSettings, costFunction); solver.testGradient(worker); }; worker_->launch(task); } } void ActionReconstructionApp::updateViewResult() { } void ActionReconstructionApp::draw() { using namespace ar::utils; if (paramsGeneral_.printMode_) cinder::gl::clear(cinder::Color(1, 1, 1)); else cinder::gl::clear(cinder::Color(0, 0, 0)); // WORLD SPACE ci::gl::enableDepthRead(); ci::gl::enableDepthWrite(); ci::gl::setMatrices(camera_); switch (currentState_) { case State::InspectScan: drawInspectScan(); break; case State::Reconstruction: drawReconstruction(); break; case State::ViewResult: drawViewResult(); break; } // WINDOW SPACE ci::gl::disableDepthRead(); ci::gl::disableDepthWrite(); ci::gl::setMatricesWindow(getWindowSize(), true); // Draw the background worker's status if (worker_ && !worker_->isDone()) { //draw waiting animation { ci::gl::ScopedModelMatrix scopedMatrix; ci::gl::ScopedColor scopedColor; ci::gl::translate(25, getWindowHeight() - 50); int step; double dummy; step = static_cast<int>(std::modf(getElapsedSeconds(), &dummy) * 8); for (int i = 0; i < 8; ++i) { float c = ((i + step)%8) / 7.0f; ci::gl::color(c, c, c); ci::gl::drawSolidRoundedRect(ci::Rectf(5, -2, 15, 2), 2); ci::gl::rotate(-2.0f * M_PI / 8.0f); } } //draw status cinder::gl::drawString(worker_->getStatus(), glm::vec2(50, getWindowHeight() - 50), paramsGeneral_.printMode_ ? cinder::ColorA(0, 0, 0) : cinder::ColorA(1, 1, 1)); } // Draw the interface paramsGeneralInterface_->draw(); paramsInspectScanInterface_->draw(); paramsReconstructionInterface_->draw(); paramsViewResultInterface_->draw(); tfe_->draw(); } void ActionReconstructionApp::drawGroundTruth(int frame) { if (frame >= paramsInspectScan_.getConfig()->duration) return; if (paramsInspectScan_.getConfig()->groundTruth) { //ground truth is a ball ci::gl::ScopedGlslProg glslScope(ci::gl::getStockShader(ci::gl::ShaderDef().texture())); ci::gl::ScopedTextureBind texScope(floorTexture_); ci::gl::ScopedModelMatrix scopedMatrix; const ar3d::InputGroundTruth& groundTruth = *(paramsInspectScan_.getConfig()->groundTruth); ci::gl::translate(ar::utils::toGLM(groundTruth.locations[frame])); ci::gl::rotate(ar::utils::toGLM(groundTruth.rotations[frame])); ci::gl::drawSphere(ci::vec3(0, 0, 0), groundTruth.radius, 16); } else if (paramsInspectScan_.getConfig()->groundTruthMeshes) { static int oldFrame = -1; if (oldFrame != frame) { oldFrame = frame; groundTruthVboMesh_ = nullptr; } //ground truth is a mesh if (!groundTruthVboMesh_) { //ObjLoader loader(loadFile(path + "/groundTruth/frame" + std::to_string(frame) + ".obj")); //groundTruthVboMesh = ci::gl::VboMesh::create(loader); ci::TriMeshRef triMesh = ObjectLoader::loadCustomObj(paramsInspectScan_.getConfig()->getPathToGroundTruth(frame)); groundTruthVboMesh_ = ci::gl::VboMesh::create(*triMesh); } ci::gl::ScopedGlslProg glslScope(groundTruthShader_); ci::gl::ScopedModelMatrix scopedMatrix; ci::gl::draw(groundTruthVboMesh_); } } void ActionReconstructionApp::drawProjCameraPoints(int frame) { if (frame >= paramsInspectScan_.getConfig()->duration) return; static int oldFrame = -1; if (oldFrame != frame) { oldFrame = frame; projCamPoints_ = this->meshReconstruction_->getProjectedPoints(paramsInspectScan_.getConfig(), dataLoader_->loadFrame(frame)); } ci::gl::ScopedColor col(1, 0, 1); //pointsBatch_->draw(); for (const ci::vec3& p : projCamPoints_) { ci::gl::drawCube(p, ci::vec3(0.001, 0.001, 0.001)); } } void ActionReconstructionApp::drawCameras(int frame) { if (frameData_ == nullptr && frame < paramsInspectScan_.getConfig()->duration && dataLoader_ != nullptr && (paramsGeneral_.showCameraMode_ == ParamsGeneral::ShowCameraMode::Color || paramsGeneral_.showCameraMode_ == ParamsGeneral::ShowCameraMode::Depth) && worker_->isDone()) { //load frame data ar3d::BackgroundWorker2::task task = [this, frame](ar3d::BackgroundWorker2* worker) { auto frameDataTmp = this->dataLoader_->loadFrame(frame, worker); if (worker->isInterrupted()) return; this->frameData_ = frameDataTmp; }; worker_->launch(task); } for (size_t i = 0; i < paramsInspectScan_.getConfig()->cameras.size(); ++i) { const ar3d::DataCamera& c = paramsInspectScan_.getConfig()->cameras[i].camera; //optional: show current frame if (frameData_ != nullptr) { ci::gl::ScopedModelMatrix scopedMatrix; ci::gl::multModelMatrix(c.invViewProjMatrix); //ci::gl::translate(0, 0, -1); ci::gl::translate(0, 0, paramsInspectScan_.getConfig()->viewCameraImageTranslation); ci::gl::scale(1, -1, 1); if (paramsGeneral_.showCameraMode_ == ParamsGeneral::ShowCameraMode::Color) { ci::gl::draw(frameData_->cameraImages[i].getColorTexture(), ci::Rectf(-1, -1, 1, 1)); } else if (paramsGeneral_.showCameraMode_ == ParamsGeneral::ShowCameraMode::Depth) { ci::gl::draw(frameData_->cameraImages[i].getDepthTexture(), ci::Rectf(-1, -1, 1, 1)); } } //control points ci::gl::ScopedColor scopedColor; ci::gl::color(0.8, 0.2, 0.2); double dist = paramsInspectScan_.getConfig()->viewCameraNearPlane; // = 0.0; //Near plane ci::gl::drawSphere(c.location, 0.03f, 16); ci::gl::drawSphere(c.getWorldCoordinates(glm::vec3(0, 0, dist)), 0.02f, 16); ci::gl::drawSphere(c.getWorldCoordinates(glm::vec3(1, 0, dist)), 0.02f, 16); ci::gl::drawSphere(c.getWorldCoordinates(glm::vec3(0, 1, dist)), 0.02f, 16); ci::gl::drawSphere(c.getWorldCoordinates(glm::vec3(1, 1, dist)), 0.02f, 16); //lines ci::gl::color(0.8, 0.4, 0.4); ci::gl::drawLine(c.location, c.getWorldCoordinates(glm::vec3(0, 0, dist))); ci::gl::drawLine(c.location, c.getWorldCoordinates(glm::vec3(1, 0, dist))); ci::gl::drawLine(c.location, c.getWorldCoordinates(glm::vec3(0, 1, dist))); ci::gl::drawLine(c.location, c.getWorldCoordinates(glm::vec3(1, 1, dist))); ci::gl::drawLine(c.getWorldCoordinates(glm::vec3(0, 0, dist)), c.getWorldCoordinates(glm::vec3(1, 0, dist))); ci::gl::drawLine(c.getWorldCoordinates(glm::vec3(0, 0, dist)), c.getWorldCoordinates(glm::vec3(0, 1, dist))); ci::gl::drawLine(c.getWorldCoordinates(glm::vec3(1, 1, dist)), c.getWorldCoordinates(glm::vec3(1, 0, dist))); ci::gl::drawLine(c.getWorldCoordinates(glm::vec3(1, 1, dist)), c.getWorldCoordinates(glm::vec3(0, 1, dist))); } } void ActionReconstructionApp::drawInspectScan() { if (paramsInspectScan_.getConfig() == nullptr) return; //WORLD SPACE //if (/*showFloor*/ true) { // ci::gl::ScopedGlslProg glslScope(ci::gl::getStockShader(ci::gl::ShaderDef().texture())); // ci::gl::ScopedTextureBind texScope(floorTexture_); // ci::gl::draw(floorVboMesh_); //} if (paramsGeneral_.showGroundTruth_) { drawGroundTruth(paramsInspectScan_.frame_); } if (paramsGeneral_.showCameraMode_!=ParamsGeneral::ShowCameraMode::Off) { drawCameras(paramsInspectScan_.frame_); } if (paramsGeneral_.showBoundingBox_ && meshReconstruction_) { ci::gl::ScopedColor scopedColor; //draw the camera pyramids ci::gl::color(0.5, 0.5, 1.0); for (const ar::geom3d::Pyramid& p : meshReconstruction_->getMaskPyramids()) { for (const auto& v : p.edgeRays) { ci::gl::drawLine(ar::utils::toGLM(p.center), ar::utils::toGLM((v * 5 + p.center).eval())); } } if (meshReconstruction_->getBoundingBox().isValid()) { //draw the bounding box ci::gl::color(0.2, 0.2, 1.0); drawWireCube(meshReconstruction_->getBoundingBox().min, meshReconstruction_->getBoundingBox().max); } } if (meshReconstruction_) { ar3d::WorldGridDoubleDataPtr sdf = referenceSdf_;//meshReconstruction_->getSdfData(); if (sdf) { //visualizate the SDF ar3d::WorldGridPtr grid = meshReconstruction_->getWorldGrid(); ci::gl::Texture3dRef tex = sdf->getTexture(ar3d::WorldGridData<double>::DataSource::HOST); //update volume vis and render ar3d::SoftBodyGrid3D::Input input; input.grid_ = grid; input.referenceSdf_ = sdf; volumeVis_->setInput(input); volumeVis_->setTransferFunction(tfe_->getTexture(), tfe_->getRangeMin(), tfe_->getRangeMax()); volumeVis_->draw(); if (animationTakeScreenshot_) animationRendered_ = true; } } if (paramsGeneral_.viewPoints_) { drawProjCameraPoints(paramsInspectScan_.frame_); } } void ActionReconstructionApp::drawReconstruction() { if (!gridSimulation_) return; //WORLD SPACE //ground plane if (paramsReconstruction_.simulationSettings_.getSettings().enableCollision_) { cinder::gl::ScopedModelMatrix m; glm::vec3 ref(0, 1, 0); glm::vec3 n( paramsReconstruction_.simulationSettings_.getSettings().groundPlane_.x, paramsReconstruction_.simulationSettings_.getSettings().groundPlane_.y, paramsReconstruction_.simulationSettings_.getSettings().groundPlane_.z); cinder::gl::rotate(acos(dot(ref, n)), cross(ref, n)); cinder::gl::translate(0, paramsReconstruction_.simulationSettings_.getSettings().groundPlane_.w, 0); cinder::gl::ScopedGlslProg glslScope(cinder::gl::getStockShader(cinder::gl::ShaderDef().texture())); cinder::gl::ScopedTextureBind texScope(floorTexture_); cinder::gl::draw(floorVboMesh_); } //dirichlet boundaries { cinder::gl::ScopedColor c; cinder::gl::color(0, 0, 1, 1); if (paramsReconstruction_.inputSettings_.getSettings().enableDirichlet) { drawWireCube(paramsReconstruction_.inputSettings_.getSettings().centerDirichlet - paramsReconstruction_.inputSettings_.getSettings().halfsizeDirichlet, paramsReconstruction_.inputSettings_.getSettings().centerDirichlet + paramsReconstruction_.inputSettings_.getSettings().halfsizeDirichlet); } } //reference grid bounds if (paramsGeneral_.showBoundingBox_) { ar3d::WorldGridPtr grid = gridSimulation_->getState().advectedSDF_ != nullptr ? gridSimulation_->getState().advectedSDF_->getGrid() : gridSimulation_->getInput().grid_; cinder::gl::ScopedColor scopedColor; cinder::gl::color(0.2, 1.0, 0.2); Eigen::Vector3d voxelSize(grid->getVoxelSize(), grid->getVoxelSize(), grid->getVoxelSize()); Eigen::Vector3d minCorner = (grid->getOffset()).cast<double>().array() * voxelSize.array(); Eigen::Vector3d maxCorner = (grid->getOffset() + grid->getSize() + Eigen::Vector3i(1, 1, 1)).cast<double>().array() * voxelSize.array(); drawWireCube(minCorner, maxCorner); } //advected grid bounds if (paramsGeneral_.showBoundingBox_) { cinder::gl::ScopedColor scopedColor; cinder::gl::color(1.0, 0.2, 0.2); const Eigen::Vector3d& minCorner = gridSimulation_->getState().advectedBoundingBox_.min; const Eigen::Vector3d& maxCorner = gridSimulation_->getState().advectedBoundingBox_.max; drawWireCube(minCorner, maxCorner); } if (paramsGeneral_.showGroundTruth_) { drawGroundTruth(frameCounter_); } if (paramsGeneral_.showCameraMode_ != ParamsGeneral::ShowCameraMode::Off) { drawCameras(frameCounter_); } //partial observation vis if (visualizePartialObservations_) { observationVis_.draw(); } //main vis volumeVis_->setTransferFunction(tfe_->getTexture(), tfe_->getRangeMin(), tfe_->getRangeMax()); volumeVis_->draw(); if (animationTakeScreenshot_) animationRendered_ = true; //points if (paramsGeneral_.viewPoints_) { drawProjCameraPoints(frameCounter_/(paramsReconstruction_.costIntermediateSteps_+1)); } // WINDOW SPACE cinder::gl::disableDepthRead(); cinder::gl::disableDepthWrite(); cinder::gl::setMatricesWindow(getWindowSize(), true); //draw plots { const int offset = 10; const int width = 0.2 * getWindowWidth(); int numPlots = 1; for (const auto& e : plots) if (std::get<2>(e)(paramsReconstruction_.adjointSettings_.getSettings().variables_)) numPlots++; numPlots = std::max(4, numPlots); const int height = getWindowHeight() / numPlots; costPlot->setBoundingRect(cinder::Rectf(getWindowWidth() - width, offset, getWindowWidth() - offset, height - offset)); costPlot->setPrintMode(paramsGeneral_.printMode_); costPlot->draw(); int y = 1; for (const auto& e : plots) { if (!std::get<2>(e)(paramsReconstruction_.adjointSettings_.getSettings().variables_)) continue; std::get<0>(e)->setBoundingRect(cinder::Rectf(getWindowWidth() - width, y * height + offset, getWindowWidth() - offset, (y + 1) * height - offset)); std::get<0>(e)->setPrintMode(paramsGeneral_.printMode_); std::get<0>(e)->draw(); y++; } } } void ActionReconstructionApp::drawViewResult() { } void ActionReconstructionApp::nextReconstruction() { //check if we are ready to continue if (!worker_->isDone() || !meshReconstruction_) { CI_LOG_E("Input Reconstruction not completed yet"); return; } //pass the reference SDF to the next stage auto s = paramsReconstruction_.simulationSettings_.getSettings(); s.timestep_ = 1.0 / paramsInspectScan_.getConfig()->framerate; paramsReconstruction_.simulationSettings_.setSettings(s); //initialize forward simulation paramsReconstruction_.setWorldGrid(referenceSdf_->getGrid()); //update UI currentState_ = State::Reconstruction; paramsInspectScanInterface_->show(false); paramsReconstructionInterface_->show(true); } void ActionReconstructionApp::prevInspectScan() { //TODO } void ActionReconstructionApp::nextViewResult() { //TODO } void ActionReconstructionApp::prevReconstruction() { //TODO } void ActionReconstructionApp::cleanup() { if (worker_) { worker_->interrupt(); worker_->wait(); } gridSimulation_.reset(); dataLoader_.reset(); frameData_.reset(); referenceSdf_.reset(); cudaPrintfEnd(); App::quit(); } void ActionReconstructionApp::drawWireCube(const Eigen::Vector3d& ea, const Eigen::Vector3d& eb) { glm::vec3 a = utils::toGLM(ea); glm::vec3 b = utils::toGLM(eb); ci::gl::drawLine(a, glm::vec3(a.x, a.y, b.z)); ci::gl::drawLine(a, glm::vec3(a.x, b.y, a.z)); ci::gl::drawLine(a, glm::vec3(b.x, a.y, a.z)); ci::gl::drawLine(glm::vec3(a.x, b.y, b.z), glm::vec3(a.x, a.y, b.z)); ci::gl::drawLine(glm::vec3(b.x, a.y, b.z), glm::vec3(a.x, a.y, b.z)); ci::gl::drawLine(glm::vec3(a.x, b.y, b.z), glm::vec3(a.x, b.y, a.z)); ci::gl::drawLine(glm::vec3(b.x, b.y, a.z), glm::vec3(a.x, b.y, a.z)); ci::gl::drawLine(glm::vec3(b.x, b.y, a.z), glm::vec3(b.x, a.y, a.z)); ci::gl::drawLine(glm::vec3(b.x, a.y, b.z), glm::vec3(b.x, a.y, a.z)); ci::gl::drawLine(b, glm::vec3(a.x, b.y, b.z)); ci::gl::drawLine(b, glm::vec3(b.x, b.y, a.z)); ci::gl::drawLine(b, glm::vec3(b.x, a.y, b.z)); } void ActionReconstructionApp::drawWireCube(const ar3d::real3 & a, const ar3d::real3 & b) { drawWireCube(Eigen::Vector3d(a.x, a.y, a.z), Eigen::Vector3d(b.x, b.y, b.z)); } void ActionReconstructionApp::load() { cinder::fs::path initialPath = ""; cinder::fs::path path = getOpenFilePath(initialPath, std::vector<std::string>({ "json" })); if (path.empty()) { CI_LOG_I("loading cancelled"); return; } cinder::DataSourceRef source = cinder::loadFile(path); if (!source) { CI_LOG_E("Unable to load file " << path.string()); return; } cinder::JsonTree root; try { root = cinder::JsonTree(source); } catch (const cinder::JsonTree::ExcJsonParserError& ex) { CI_LOG_E("Unable to load json file " << path.string() << ": " << ex.what()); return; } paramsGeneral_.load(root.getChild("General")); paramsInspectScan_.load(root.getChild("InspectScan")); paramsReconstruction_.load(root.getChild("Reconstruction")); paramsViewResult_.load(root.getChild("ViewResult")); } void ActionReconstructionApp::save() { cinder::fs::path initialPath = ""; cinder::fs::path path = getSaveFilePath(initialPath, std::vector<std::string>({ "json" })); if (path.empty()) { CI_LOG_I("saving cancelled"); return; } path.replace_extension("json"); cinder::JsonTree root = cinder::JsonTree::makeObject(); { cinder::JsonTree child = cinder::JsonTree::makeObject("General"); paramsGeneral_.save(child); root.addChild(child); } { cinder::JsonTree child = cinder::JsonTree::makeObject("InspectScan"); paramsInspectScan_.save(child); root.addChild(child); } { cinder::JsonTree child = cinder::JsonTree::makeObject("Reconstruction"); paramsReconstruction_.save(child); root.addChild(child); } { cinder::JsonTree child = cinder::JsonTree::makeObject("ViewResult"); paramsViewResult_.save(child); root.addChild(child); } root.write(path); CI_LOG_I("Saved to " << path.string()); } #if 1 CINDER_APP( ActionReconstructionApp, RendererGl(RendererGl::Options().msaa(4)), [&](App::Settings *settings) { settings->setWindowSize(1600, 900); } ) #endif
1ccf8078f11f552d29ab78c2de1a792779e4c91c.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHTensorMasked.cuh> #include <THH/THHTensor.hpp> #include <THH/generic/THHTensorMasked.hip> #include <THH/THHGenerateFloatType.h>
1ccf8078f11f552d29ab78c2de1a792779e4c91c.cu
#include <THC/THCTensorMasked.cuh> #include <THC/THCTensor.hpp> #include <THC/generic/THCTensorMasked.cu> #include <THC/THCGenerateFloatType.h>
65f4bcb08172664f79131bbe90bcb3b424b8bce5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <cmath> #include <iostream> #include <random/mvg.cuh> #include <random> #include "test_utils.h" // mvg.h takes in matrices that are colomn major (as in fortan) #define IDX2C(i, j, ld) (j * ld + i) namespace MLCommon { namespace Random { // helper kernels /// @todo Duplicate called vctwiseAccumulate in utils.h (Kalman Filters, // i think that is much better to use., more general) template <typename T> __global__ void En_KF_accumulate(const int nPoints, const int dim, const T *X, T *x) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int col = idx % dim; int row = idx / dim; if (col < dim && row < nPoints) raft::myAtomicAdd(x + col, X[idx]); } template <typename T> __global__ void En_KF_normalize(const int divider, const int dim, T *x) { int xi = threadIdx.x + blockDim.x * blockIdx.x; if (xi < dim) x[xi] = x[xi] / divider; } template <typename T> __global__ void En_KF_dif(const int nPoints, const int dim, const T *X, const T *x, T *X_diff) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int col = idx % dim; int row = idx / dim; if (col < dim && row < nPoints) X_diff[idx] = X[idx] - x[col]; } // for specialising tests enum Correlation : unsigned char { CORRELATED, // = 0 UNCORRELATED }; template <typename T> struct MVGInputs { T tolerance; typename MultiVarGaussian<T>::Decomposer method; Correlation corr; int dim, nPoints; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const MVGInputs<T> &dims) { return os; } template <typename T> class MVGTest : public ::testing::TestWithParam<MVGInputs<T>> { protected: void SetUp() override { // getting params params = ::testing::TestWithParam<MVGInputs<T>>::GetParam(); dim = params.dim; nPoints = params.nPoints; method = params.method; corr = params.corr; tolerance = params.tolerance; CUBLAS_CHECK(hipblasCreate(&cublasH)); CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH)); CUDA_CHECK(hipStreamCreate(&stream)); // preparing to store stuff P = (T *)malloc(sizeof(T) * dim * dim); x = (T *)malloc(sizeof(T) * dim); X = (T *)malloc(sizeof(T) * dim * nPoints); CUDA_CHECK(hipMalloc((void **)&P_d, sizeof(T) * dim * dim)); CUDA_CHECK(hipMalloc((void **)&X_d, sizeof(T) * nPoints * dim)); CUDA_CHECK(hipMalloc((void **)&x_d, sizeof(T) * dim)); CUDA_CHECK(hipMalloc((void **)&Rand_cov, sizeof(T) * dim * dim)); CUDA_CHECK(hipMalloc((void **)&Rand_mean, sizeof(T) * dim)); // generating random mean and cov. srand(params.seed); for (int j = 0; j < dim; j++) x[j] = rand() % 100 + 5.0f; // for random Cov. martix std::default_random_engine generator(params.seed); std::uniform_real_distribution<T> distribution(0.0, 1.0); // P (developing a +ve definite symm matrix) for (int j = 0; j < dim; j++) { for (int i = 0; i < j + 1; i++) { T k = distribution(generator); if (corr == UNCORRELATED) k = 0.0; P[IDX2C(i, j, dim)] = k; P[IDX2C(j, i, dim)] = k; if (i == j) P[IDX2C(i, j, dim)] += dim; } } // porting inputs to gpu raft::update_device(P_d, P, dim * dim, stream); raft::update_device(x_d, x, dim, stream); // initilizing the mvg mvg = new MultiVarGaussian<T>(dim, method); size_t o = mvg->init(cublasH, cusolverH, stream); // give the workspace area to mvg CUDA_CHECK(hipMalloc((void **)&workspace_d, o)); mvg->set_workspace(workspace_d); // get gaussians in X_d | P_d is destroyed. mvg->give_gaussian(nPoints, P_d, X_d, x_d); // saving the mean of the randoms in Rand_mean //@todo can be swapped with a API that calculates mean CUDA_CHECK(hipMemset(Rand_mean, 0, dim * sizeof(T))); dim3 block = (64); dim3 grid = (raft::ceildiv(nPoints * dim, (int)block.x)); hipLaunchKernelGGL(( En_KF_accumulate), dim3(grid), dim3(block), 0, 0, nPoints, dim, X_d, Rand_mean); CUDA_CHECK(hipPeekAtLastError()); grid = (raft::ceildiv(dim, (int)block.x)); hipLaunchKernelGGL(( En_KF_normalize), dim3(grid), dim3(block), 0, 0, nPoints, dim, Rand_mean); CUDA_CHECK(hipPeekAtLastError()); // storing the error wrt random point mean in X_d grid = (raft::ceildiv(dim * nPoints, (int)block.x)); hipLaunchKernelGGL(( En_KF_dif), dim3(grid), dim3(block), 0, 0, nPoints, dim, X_d, Rand_mean, X_d); CUDA_CHECK(hipPeekAtLastError()); // finding the cov matrix, placing in Rand_cov T alfa = 1.0 / (nPoints - 1), beta = 0.0; hipblasHandle_t handle; CUBLAS_CHECK(hipblasCreate(&handle)); CUBLAS_CHECK(raft::linalg::cublasgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, dim, dim, nPoints, &alfa, X_d, dim, X_d, dim, &beta, Rand_cov, dim, stream)); // restoring cov provided into P_d raft::update_device(P_d, P, dim * dim, stream); } void TearDown() override { // freeing mallocs CUDA_CHECK(hipFree(P_d)); CUDA_CHECK(hipFree(X_d)); CUDA_CHECK(hipFree(workspace_d)); free(P); free(x); free(X); // deleting mvg mvg->deinit(); delete mvg; CUBLAS_CHECK(hipblasDestroy(cublasH)); CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH)); CUDA_CHECK(hipStreamDestroy(stream)); } protected: MVGInputs<T> params; T *P, *x, *X, *workspace_d, *P_d, *x_d, *X_d; int dim, nPoints; typename MultiVarGaussian<T>::Decomposer method; Correlation corr; MultiVarGaussian<T> *mvg = NULL; T *Rand_cov, *Rand_mean, tolerance; hipblasHandle_t cublasH; hipsolverDnHandle_t cusolverH; hipStream_t stream; }; // end of MVGTest class ///@todo find out the reason that Un-correlated covs are giving problems (in qr) // Declare your inputs const std::vector<MVGInputs<float>> inputsf = { {0.3f, MultiVarGaussian<float>::Decomposer::chol_decomp, Correlation::CORRELATED, 5, 30000, 6ULL}, {0.1f, MultiVarGaussian<float>::Decomposer::chol_decomp, Correlation::UNCORRELATED, 5, 30000, 6ULL}, {0.25f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::CORRELATED, 5, 30000, 6ULL}, {0.1f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::UNCORRELATED, 5, 30000, 6ULL}, {0.2f, MultiVarGaussian<float>::Decomposer::qr, Correlation::CORRELATED, 5, 30000, 6ULL}, // { 0.2f, MultiVarGaussian<float>::Decomposer::qr, // Correlation::UNCORRELATED, 5, 30000, 6ULL} }; const std::vector<MVGInputs<double>> inputsd = { {0.25, MultiVarGaussian<double>::Decomposer::chol_decomp, Correlation::CORRELATED, 10, 3000000, 6ULL}, {0.1, MultiVarGaussian<double>::Decomposer::chol_decomp, Correlation::UNCORRELATED, 10, 3000000, 6ULL}, {0.25, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::CORRELATED, 10, 3000000, 6ULL}, {0.1, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::UNCORRELATED, 10, 3000000, 6ULL}, {0.2, MultiVarGaussian<double>::Decomposer::qr, Correlation::CORRELATED, 10, 3000000, 6ULL}, // { 0.2, MultiVarGaussian<double>::Decomposer::qr, // Correlation::UNCORRELATED, 10, 3000000, 6ULL} }; // make the tests typedef MVGTest<float> MVGTestF; typedef MVGTest<double> MVGTestD; TEST_P(MVGTestF, MeanIsCorrectF) { EXPECT_TRUE(raft::devArrMatch(x_d, Rand_mean, dim, raft::CompareApprox<float>(tolerance))) << " in MeanIsCorrect"; } TEST_P(MVGTestF, CovIsCorrectF) { EXPECT_TRUE(raft::devArrMatch(P_d, Rand_cov, dim, dim, raft::CompareApprox<float>(tolerance))) << " in CovIsCorrect"; } TEST_P(MVGTestD, MeanIsCorrectD) { EXPECT_TRUE(raft::devArrMatch(x_d, Rand_mean, dim, raft::CompareApprox<double>(tolerance))) << " in MeanIsCorrect"; } TEST_P(MVGTestD, CovIsCorrectD) { EXPECT_TRUE(raft::devArrMatch(P_d, Rand_cov, dim, dim, raft::CompareApprox<double>(tolerance))) << " in CovIsCorrect"; } // call the tests INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestD, ::testing::ValuesIn(inputsd)); }; // end of namespace Random }; // end of namespace MLCommon
65f4bcb08172664f79131bbe90bcb3b424b8bce5.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <cmath> #include <iostream> #include <random/mvg.cuh> #include <random> #include "test_utils.h" // mvg.h takes in matrices that are colomn major (as in fortan) #define IDX2C(i, j, ld) (j * ld + i) namespace MLCommon { namespace Random { // helper kernels /// @todo Duplicate called vctwiseAccumulate in utils.h (Kalman Filters, // i think that is much better to use., more general) template <typename T> __global__ void En_KF_accumulate(const int nPoints, const int dim, const T *X, T *x) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int col = idx % dim; int row = idx / dim; if (col < dim && row < nPoints) raft::myAtomicAdd(x + col, X[idx]); } template <typename T> __global__ void En_KF_normalize(const int divider, const int dim, T *x) { int xi = threadIdx.x + blockDim.x * blockIdx.x; if (xi < dim) x[xi] = x[xi] / divider; } template <typename T> __global__ void En_KF_dif(const int nPoints, const int dim, const T *X, const T *x, T *X_diff) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int col = idx % dim; int row = idx / dim; if (col < dim && row < nPoints) X_diff[idx] = X[idx] - x[col]; } // for specialising tests enum Correlation : unsigned char { CORRELATED, // = 0 UNCORRELATED }; template <typename T> struct MVGInputs { T tolerance; typename MultiVarGaussian<T>::Decomposer method; Correlation corr; int dim, nPoints; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const MVGInputs<T> &dims) { return os; } template <typename T> class MVGTest : public ::testing::TestWithParam<MVGInputs<T>> { protected: void SetUp() override { // getting params params = ::testing::TestWithParam<MVGInputs<T>>::GetParam(); dim = params.dim; nPoints = params.nPoints; method = params.method; corr = params.corr; tolerance = params.tolerance; CUBLAS_CHECK(cublasCreate(&cublasH)); CUSOLVER_CHECK(cusolverDnCreate(&cusolverH)); CUDA_CHECK(cudaStreamCreate(&stream)); // preparing to store stuff P = (T *)malloc(sizeof(T) * dim * dim); x = (T *)malloc(sizeof(T) * dim); X = (T *)malloc(sizeof(T) * dim * nPoints); CUDA_CHECK(cudaMalloc((void **)&P_d, sizeof(T) * dim * dim)); CUDA_CHECK(cudaMalloc((void **)&X_d, sizeof(T) * nPoints * dim)); CUDA_CHECK(cudaMalloc((void **)&x_d, sizeof(T) * dim)); CUDA_CHECK(cudaMalloc((void **)&Rand_cov, sizeof(T) * dim * dim)); CUDA_CHECK(cudaMalloc((void **)&Rand_mean, sizeof(T) * dim)); // generating random mean and cov. srand(params.seed); for (int j = 0; j < dim; j++) x[j] = rand() % 100 + 5.0f; // for random Cov. martix std::default_random_engine generator(params.seed); std::uniform_real_distribution<T> distribution(0.0, 1.0); // P (developing a +ve definite symm matrix) for (int j = 0; j < dim; j++) { for (int i = 0; i < j + 1; i++) { T k = distribution(generator); if (corr == UNCORRELATED) k = 0.0; P[IDX2C(i, j, dim)] = k; P[IDX2C(j, i, dim)] = k; if (i == j) P[IDX2C(i, j, dim)] += dim; } } // porting inputs to gpu raft::update_device(P_d, P, dim * dim, stream); raft::update_device(x_d, x, dim, stream); // initilizing the mvg mvg = new MultiVarGaussian<T>(dim, method); size_t o = mvg->init(cublasH, cusolverH, stream); // give the workspace area to mvg CUDA_CHECK(cudaMalloc((void **)&workspace_d, o)); mvg->set_workspace(workspace_d); // get gaussians in X_d | P_d is destroyed. mvg->give_gaussian(nPoints, P_d, X_d, x_d); // saving the mean of the randoms in Rand_mean //@todo can be swapped with a API that calculates mean CUDA_CHECK(cudaMemset(Rand_mean, 0, dim * sizeof(T))); dim3 block = (64); dim3 grid = (raft::ceildiv(nPoints * dim, (int)block.x)); En_KF_accumulate<<<grid, block>>>(nPoints, dim, X_d, Rand_mean); CUDA_CHECK(cudaPeekAtLastError()); grid = (raft::ceildiv(dim, (int)block.x)); En_KF_normalize<<<grid, block>>>(nPoints, dim, Rand_mean); CUDA_CHECK(cudaPeekAtLastError()); // storing the error wrt random point mean in X_d grid = (raft::ceildiv(dim * nPoints, (int)block.x)); En_KF_dif<<<grid, block>>>(nPoints, dim, X_d, Rand_mean, X_d); CUDA_CHECK(cudaPeekAtLastError()); // finding the cov matrix, placing in Rand_cov T alfa = 1.0 / (nPoints - 1), beta = 0.0; cublasHandle_t handle; CUBLAS_CHECK(cublasCreate(&handle)); CUBLAS_CHECK(raft::linalg::cublasgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, dim, dim, nPoints, &alfa, X_d, dim, X_d, dim, &beta, Rand_cov, dim, stream)); // restoring cov provided into P_d raft::update_device(P_d, P, dim * dim, stream); } void TearDown() override { // freeing mallocs CUDA_CHECK(cudaFree(P_d)); CUDA_CHECK(cudaFree(X_d)); CUDA_CHECK(cudaFree(workspace_d)); free(P); free(x); free(X); // deleting mvg mvg->deinit(); delete mvg; CUBLAS_CHECK(cublasDestroy(cublasH)); CUSOLVER_CHECK(cusolverDnDestroy(cusolverH)); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: MVGInputs<T> params; T *P, *x, *X, *workspace_d, *P_d, *x_d, *X_d; int dim, nPoints; typename MultiVarGaussian<T>::Decomposer method; Correlation corr; MultiVarGaussian<T> *mvg = NULL; T *Rand_cov, *Rand_mean, tolerance; cublasHandle_t cublasH; cusolverDnHandle_t cusolverH; cudaStream_t stream; }; // end of MVGTest class ///@todo find out the reason that Un-correlated covs are giving problems (in qr) // Declare your inputs const std::vector<MVGInputs<float>> inputsf = { {0.3f, MultiVarGaussian<float>::Decomposer::chol_decomp, Correlation::CORRELATED, 5, 30000, 6ULL}, {0.1f, MultiVarGaussian<float>::Decomposer::chol_decomp, Correlation::UNCORRELATED, 5, 30000, 6ULL}, {0.25f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::CORRELATED, 5, 30000, 6ULL}, {0.1f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::UNCORRELATED, 5, 30000, 6ULL}, {0.2f, MultiVarGaussian<float>::Decomposer::qr, Correlation::CORRELATED, 5, 30000, 6ULL}, // { 0.2f, MultiVarGaussian<float>::Decomposer::qr, // Correlation::UNCORRELATED, 5, 30000, 6ULL} }; const std::vector<MVGInputs<double>> inputsd = { {0.25, MultiVarGaussian<double>::Decomposer::chol_decomp, Correlation::CORRELATED, 10, 3000000, 6ULL}, {0.1, MultiVarGaussian<double>::Decomposer::chol_decomp, Correlation::UNCORRELATED, 10, 3000000, 6ULL}, {0.25, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::CORRELATED, 10, 3000000, 6ULL}, {0.1, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::UNCORRELATED, 10, 3000000, 6ULL}, {0.2, MultiVarGaussian<double>::Decomposer::qr, Correlation::CORRELATED, 10, 3000000, 6ULL}, // { 0.2, MultiVarGaussian<double>::Decomposer::qr, // Correlation::UNCORRELATED, 10, 3000000, 6ULL} }; // make the tests typedef MVGTest<float> MVGTestF; typedef MVGTest<double> MVGTestD; TEST_P(MVGTestF, MeanIsCorrectF) { EXPECT_TRUE(raft::devArrMatch(x_d, Rand_mean, dim, raft::CompareApprox<float>(tolerance))) << " in MeanIsCorrect"; } TEST_P(MVGTestF, CovIsCorrectF) { EXPECT_TRUE(raft::devArrMatch(P_d, Rand_cov, dim, dim, raft::CompareApprox<float>(tolerance))) << " in CovIsCorrect"; } TEST_P(MVGTestD, MeanIsCorrectD) { EXPECT_TRUE(raft::devArrMatch(x_d, Rand_mean, dim, raft::CompareApprox<double>(tolerance))) << " in MeanIsCorrect"; } TEST_P(MVGTestD, CovIsCorrectD) { EXPECT_TRUE(raft::devArrMatch(P_d, Rand_cov, dim, dim, raft::CompareApprox<double>(tolerance))) << " in CovIsCorrect"; } // call the tests INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestD, ::testing::ValuesIn(inputsd)); }; // end of namespace Random }; // end of namespace MLCommon
35fce21279d36de672d934b1c51fcabca9451a1c.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thread> // NOLINT #include <vector> #include "gtest/gtest.h" #include "paddle/fluid/memory/allocation/best_fit_allocator.h" #include "paddle/fluid/memory/allocation/cuda_allocator.h" #include "paddle/fluid/memory/allocation/locked_allocator.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace memory { namespace allocation { struct ForEachFill { size_t* ptr_; explicit ForEachFill(size_t* ptr) : ptr_(ptr) {} __device__ void operator()(size_t i) { ptr_[i] = i; } }; TEST(BestFitAllocator, concurrent_cuda) { HIPAllocator allocator(platform::CUDAPlace(0)); // 256 MB auto cuda_allocation = allocator.Allocate(256U * 1024 * 1024, allocator.kDefault); LockedAllocator concurrent_allocator( std::unique_ptr<Allocator>(new BestFitAllocator(cuda_allocation.get()))); auto th_main = [&] { std::random_device dev; std::default_random_engine engine(dev()); std::uniform_int_distribution<size_t> dist(1U, 1024U); platform::CUDAPlace gpu(0); platform::CUDADeviceContext dev_ctx(gpu); std::array<size_t, 1024> buf; for (size_t i = 0; i < 128; ++i) { size_t allocate_size = dist(engine); auto allocation = concurrent_allocator.Allocate( sizeof(size_t) * allocate_size, concurrent_allocator.kDefault); size_t* data = reinterpret_cast<size_t*>(allocation->ptr()); ForEachFill fill(data); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, allocate_size); for_range(fill); memory::Copy(platform::CPUPlace(), buf.data(), gpu, data, sizeof(size_t) * allocate_size, dev_ctx.stream()); dev_ctx.Wait(); for (size_t j = 0; j < allocate_size; ++j) { ASSERT_EQ(buf[j], j); } allocation = nullptr; } }; { std::vector<std::thread> threads; for (size_t i = 0; i < 1024; ++i) { threads.emplace_back(th_main); } for (auto& th : threads) { th.join(); } } } } // namespace allocation } // namespace memory } // namespace paddle
35fce21279d36de672d934b1c51fcabca9451a1c.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thread> // NOLINT #include <vector> #include "gtest/gtest.h" #include "paddle/fluid/memory/allocation/best_fit_allocator.h" #include "paddle/fluid/memory/allocation/cuda_allocator.h" #include "paddle/fluid/memory/allocation/locked_allocator.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace memory { namespace allocation { struct ForEachFill { size_t* ptr_; explicit ForEachFill(size_t* ptr) : ptr_(ptr) {} __device__ void operator()(size_t i) { ptr_[i] = i; } }; TEST(BestFitAllocator, concurrent_cuda) { CUDAAllocator allocator(platform::CUDAPlace(0)); // 256 MB auto cuda_allocation = allocator.Allocate(256U * 1024 * 1024, allocator.kDefault); LockedAllocator concurrent_allocator( std::unique_ptr<Allocator>(new BestFitAllocator(cuda_allocation.get()))); auto th_main = [&] { std::random_device dev; std::default_random_engine engine(dev()); std::uniform_int_distribution<size_t> dist(1U, 1024U); platform::CUDAPlace gpu(0); platform::CUDADeviceContext dev_ctx(gpu); std::array<size_t, 1024> buf; for (size_t i = 0; i < 128; ++i) { size_t allocate_size = dist(engine); auto allocation = concurrent_allocator.Allocate( sizeof(size_t) * allocate_size, concurrent_allocator.kDefault); size_t* data = reinterpret_cast<size_t*>(allocation->ptr()); ForEachFill fill(data); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, allocate_size); for_range(fill); memory::Copy(platform::CPUPlace(), buf.data(), gpu, data, sizeof(size_t) * allocate_size, dev_ctx.stream()); dev_ctx.Wait(); for (size_t j = 0; j < allocate_size; ++j) { ASSERT_EQ(buf[j], j); } allocation = nullptr; } }; { std::vector<std::thread> threads; for (size_t i = 0; i < 1024; ++i) { threads.emplace_back(th_main); } for (auto& th : threads) { th.join(); } } } } // namespace allocation } // namespace memory } // namespace paddle
5bd5b832f4de413cde0719b65d2a99cf1d41063b.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/matrix/matvecmul.h" #include "cunumeric/matrix/matvecmul_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { using namespace Legion; template <> struct MatVecMulImplBody<VariantKind::GPU, LegateTypeCode::FLOAT_LT> { void operator()(size_t m, size_t n, float* lhs, const float* mat, const float* vec, size_t mat_stride, bool transpose_mat) { hipblasHandle_t cublas_handle = Core::get_cublas(); // Update the stream because the CUDA hijack can't see inside cuBLAS hipStream_t task_stream; hipStreamCreate(&task_stream); CHECK_CUBLAS(hipblasSetStream(cublas_handle, task_stream)); const float alpha = 1.f; const float beta = 0.f; auto trans = transpose_mat ? HIPBLAS_OP_N : HIPBLAS_OP_T; CHECK_CUBLAS( hipblasSgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); hipStreamDestroy(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, LegateTypeCode::DOUBLE_LT> { void operator()(size_t m, size_t n, double* lhs, const double* mat, const double* vec, size_t mat_stride, bool transpose_mat) { hipblasHandle_t cublas_handle = Core::get_cublas(); // Update the stream because the CUDA hijack can't see inside cuBLAS hipStream_t task_stream; hipStreamCreate(&task_stream); CHECK_CUBLAS(hipblasSetStream(cublas_handle, task_stream)); const double alpha = 1.f; const double beta = 0.f; auto trans = transpose_mat ? HIPBLAS_OP_N : HIPBLAS_OP_T; CHECK_CUBLAS( hipblasDgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); hipStreamDestroy(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, LegateTypeCode::HALF_LT> { void operator()(size_t m, size_t n, float* lhs, const __half* mat, const __half* vec, size_t mat_stride, bool transpose_mat) { hipblasHandle_t cublas_handle = Core::get_cublas(); // Update the stream because the CUDA hijack can't see inside cuBLAS hipStream_t task_stream; hipStreamCreate(&task_stream); CHECK_CUBLAS(hipblasSetStream(cublas_handle, task_stream)); const float alpha = 1.f; const float beta = 0.f; // Use SgemmEx here since there is no half precision gemv yet if (transpose_mat) { CHECK_CUBLAS(cublasSgemmEx(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, 1, m, &alpha, mat, HIP_R_16F, mat_stride, vec, HIP_R_16F, m, &beta, lhs, HIP_R_32F, n)); } else { CHECK_CUBLAS(cublasSgemmEx(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, 1, n, &alpha, mat, HIP_R_16F, mat_stride, vec, HIP_R_16F, n, &beta, lhs, HIP_R_32F, m)); } hipStreamDestroy(task_stream); } }; /*static*/ void MatVecMulTask::gpu_variant(TaskContext& context) { matvecmul_template<VariantKind::GPU>(context); } } // namespace cunumeric
5bd5b832f4de413cde0719b65d2a99cf1d41063b.cu
/* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/matrix/matvecmul.h" #include "cunumeric/matrix/matvecmul_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { using namespace Legion; template <> struct MatVecMulImplBody<VariantKind::GPU, LegateTypeCode::FLOAT_LT> { void operator()(size_t m, size_t n, float* lhs, const float* mat, const float* vec, size_t mat_stride, bool transpose_mat) { cublasHandle_t cublas_handle = Core::get_cublas(); // Update the stream because the CUDA hijack can't see inside cuBLAS cudaStream_t task_stream; cudaStreamCreate(&task_stream); CHECK_CUBLAS(cublasSetStream(cublas_handle, task_stream)); const float alpha = 1.f; const float beta = 0.f; auto trans = transpose_mat ? CUBLAS_OP_N : CUBLAS_OP_T; CHECK_CUBLAS( cublasSgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); cudaStreamDestroy(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, LegateTypeCode::DOUBLE_LT> { void operator()(size_t m, size_t n, double* lhs, const double* mat, const double* vec, size_t mat_stride, bool transpose_mat) { cublasHandle_t cublas_handle = Core::get_cublas(); // Update the stream because the CUDA hijack can't see inside cuBLAS cudaStream_t task_stream; cudaStreamCreate(&task_stream); CHECK_CUBLAS(cublasSetStream(cublas_handle, task_stream)); const double alpha = 1.f; const double beta = 0.f; auto trans = transpose_mat ? CUBLAS_OP_N : CUBLAS_OP_T; CHECK_CUBLAS( cublasDgemv(cublas_handle, trans, n, m, &alpha, mat, mat_stride, vec, 1, &beta, lhs, 1)); cudaStreamDestroy(task_stream); } }; template <> struct MatVecMulImplBody<VariantKind::GPU, LegateTypeCode::HALF_LT> { void operator()(size_t m, size_t n, float* lhs, const __half* mat, const __half* vec, size_t mat_stride, bool transpose_mat) { cublasHandle_t cublas_handle = Core::get_cublas(); // Update the stream because the CUDA hijack can't see inside cuBLAS cudaStream_t task_stream; cudaStreamCreate(&task_stream); CHECK_CUBLAS(cublasSetStream(cublas_handle, task_stream)); const float alpha = 1.f; const float beta = 0.f; // Use SgemmEx here since there is no half precision gemv yet if (transpose_mat) { CHECK_CUBLAS(cublasSgemmEx(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, 1, m, &alpha, mat, CUDA_R_16F, mat_stride, vec, CUDA_R_16F, m, &beta, lhs, CUDA_R_32F, n)); } else { CHECK_CUBLAS(cublasSgemmEx(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, m, 1, n, &alpha, mat, CUDA_R_16F, mat_stride, vec, CUDA_R_16F, n, &beta, lhs, CUDA_R_32F, m)); } cudaStreamDestroy(task_stream); } }; /*static*/ void MatVecMulTask::gpu_variant(TaskContext& context) { matvecmul_template<VariantKind::GPU>(context); } } // namespace cunumeric
193ed8a97eb4badfa8f3f93cd8eca9b948771c0a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011 University of Sheffield. * Author: Dr Paul Richmond * Contact: [email protected] (http://www.paulrichmond.staff.shef.ac.uk) * * University of Sheffield retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * University of Sheffield is strictly prohibited. * * For terms of licence agreement please attached licence or view licence * on www.flamegpu.com website. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cmath> #include <GL/glew.h> #include <GL/freeglut.h> #include <cuda_gl_interop.h> #include "header.h" /* Error check function for safe CUDA API calling */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } extern void registerBO(cudaGraphicsResource_t* cudaResource, GLuint* bo) { gpuErrchk(hipGraphicsGLRegisterBuffer(cudaResource, *bo, hipGraphicsMapFlagsWriteDiscard)); } extern void unregisterBO(cudaGraphicsResource_t* cudaResource) { gpuErrchk(hipGraphicsUnregisterResource(*cudaResource)); }
193ed8a97eb4badfa8f3f93cd8eca9b948771c0a.cu
/* * Copyright 2011 University of Sheffield. * Author: Dr Paul Richmond * Contact: [email protected] (http://www.paulrichmond.staff.shef.ac.uk) * * University of Sheffield retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * University of Sheffield is strictly prohibited. * * For terms of licence agreement please attached licence or view licence * on www.flamegpu.com website. * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cmath> #include <GL/glew.h> #include <GL/freeglut.h> #include <cuda_gl_interop.h> #include "header.h" /* Error check function for safe CUDA API calling */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } extern void registerBO(cudaGraphicsResource_t* cudaResource, GLuint* bo) { gpuErrchk(cudaGraphicsGLRegisterBuffer(cudaResource, *bo, cudaGraphicsMapFlagsWriteDiscard)); } extern void unregisterBO(cudaGraphicsResource_t* cudaResource) { gpuErrchk(cudaGraphicsUnregisterResource(*cudaResource)); }
956331a1d1264f54a54974ba9d9f52a17589e437.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "GpuTypes.h" #include "NNTypes.h" #include <limits> static __constant__ GpuData cData; void SetKDeltaGpuData() { hipError_t status; status = hipMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData)); RTERROR(status, "hipMemcpyToSymbol: SetKDeltaGpuData copy to cData failed"); } void GetKDeltaGpuData() { hipError_t status; status = hipMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData)); RTERROR(status, "hipMemcpyFromSymbol: GetKDeltaGpuData copy From cData failed"); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> void kCalculateOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateSigmoidOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidOutputDelta_kernel"); break; case Tanh: hipLaunchKernelGGL(( kCalculateTanhOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateTanhOutputDelta_kernel"); break; case Linear: hipLaunchKernelGGL(( kCalculateLinearOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateLinearOutputDelta_kernel"); break; case RectifiedLinear: hipLaunchKernelGGL(( kCalculateRELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: hipLaunchKernelGGL(( kCalculateLRELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, slope); LAUNCHERROR("kCalculateLRELUOutputDelta_kernel"); break; case ExponentialLinear: hipLaunchKernelGGL(( kCalculateELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha); LAUNCHERROR("kCalculateELUOutputDelta_kernel"); break; case ScaledExponentialLinear: hipLaunchKernelGGL(( kCalculateSELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha, lambda); LAUNCHERROR("kCalculateSELUOutputDelta_kernel"); break; case SoftMax: hipLaunchKernelGGL(( kCalculateSoftMaxOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> void kCalculateIndexedOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateIndexedSigmoidOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSigmoidOutputDelta_kernel"); break; case Tanh: hipLaunchKernelGGL(( kCalculateIndexedTanhOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedTanhOutputDelta_kernel"); break; case Linear: hipLaunchKernelGGL(( kCalculateIndexedLinearOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedLinearOutputDelta_kernel"); break; case RectifiedLinear: hipLaunchKernelGGL(( kCalculateIndexedRELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: hipLaunchKernelGGL(( kCalculateIndexedLRELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, slope); LAUNCHERROR("kCalculateIndexedLRELUOutputDelta_kernel"); break; case ExponentialLinear: hipLaunchKernelGGL(( kCalculateIndexedELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, alpha); LAUNCHERROR("kCalculateIndexedELUOutputDelta_kernel"); break; case ScaledExponentialLinear: hipLaunchKernelGGL(( kCalculateIndexedSELUOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, alpha, lambda); LAUNCHERROR("kCalculateIndexedSELUOutputDelta_kernel"); break; case SoftMax: hipLaunchKernelGGL(( kCalculateIndexedSoftMaxOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<typename T> void kCalculateL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateSigmoidL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidL2HingeOutputDelta_kernel"); break; case Tanh: hipLaunchKernelGGL(( kCalculateTanhL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateTanhL2HingeOutputDelta_kernel"); break; case Linear: hipLaunchKernelGGL(( kCalculateLinearL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: hipLaunchKernelGGL(( kCalculateRELUL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: hipLaunchKernelGGL(( kCalculateLRELUL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, slope); LAUNCHERROR("kCalculateLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: hipLaunchKernelGGL(( kCalculateELUL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha); LAUNCHERROR("kCalculateELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: hipLaunchKernelGGL(( kCalculateSELUL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha, lambda); LAUNCHERROR("kCalculateSELUL2HingeOutputDelta_kernel"); break; case SoftMax: hipLaunchKernelGGL(( kCalculateSoftMaxL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSoftMaxL2HingeOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<typename T> void kCalculateIndexedL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateIndexedSigmoidL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSigmoidL2HingeOutputDelta_kernel"); break; case Tanh: hipLaunchKernelGGL(( kCalculateIndexedTanhL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedTanhL2HingeOutputDelta_kernel"); break; case Linear: hipLaunchKernelGGL(( kCalculateIndexedLinearL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: hipLaunchKernelGGL(( kCalculateIndexedRELUL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: hipLaunchKernelGGL(( kCalculateIndexedLRELUL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, slope); LAUNCHERROR("kCalculateIndexedLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: hipLaunchKernelGGL(( kCalculateIndexedELUL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, alpha); LAUNCHERROR("kCalculateIndexedELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: hipLaunchKernelGGL(( kCalculateIndexedSELUL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, alpha, lambda); LAUNCHERROR("kCalculateIndexedSELUL2HIngeOutputDelta_kernel"); break; case SoftMax: hipLaunchKernelGGL(( kCalculateIndexedSoftMaxL2HingeOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSoftMaxL2HingeOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = pData[pos]; pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 256.0); pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 128.0); pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<typename T> void kCalculateHingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { unsigned long threads = max(32, min(stride, getGpu()._threadsPerBlock)); hipLaunchKernelGGL(( kCalculateHingeOutputDelta_kernel), dim3(batch), dim3(threads), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateHingeOutputDelta_kernel"); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = pData[pos]; pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 256.0); pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 128.0); pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<typename T> void kCalculateIndexedHingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { unsigned long threads = max(32, min(stride, getGpu()._threadsPerBlock)); hipLaunchKernelGGL(( kCalculateIndexedHingeOutputDelta_kernel), dim3(batch), dim3(threads), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateHingeOutputDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * a * ((NNFloat)1.0 - a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawTanhOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * ((NNFloat)1.0 - a * a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLinearOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawRELUOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * (a > (NNFloat)0.0); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLRELUOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawELUOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSELUOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSoftMaxOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = a - w; pos1 += cData._warpSize; } } } void kCalculateSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSigmoidOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroLRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateSparseNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateSparseNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateSparseNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = a - w; pos1 += cData._warpSize; } } } void kCalculateIndexedSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kIndexedCalculateSparseNonZeroSigmoidOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateIndexedSparseNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, slope); LAUNCHERROR("kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha); LAUNCHERROR("kCalculateSparseAnalogNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha, lambda); LAUNCHERROR("kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t *pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat *pSparseWeight, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat *pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> void kCalculateIndexedSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, slope); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha, lambda); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a * a * ((NNFloat)1.0 - a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawTanhL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a * ((NNFloat)1.0 - a * a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLinearL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawRELUL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a * (a > (NNFloat)0.0); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLRELUL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawELUL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSELUL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - w); pDelta[pos2] = diff; pos1 += cData._warpSize; } } } void kCalculateSparseL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSigmoidSparseL2HingeOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroTanhL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroTanhL2HingeOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroLinearL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroRELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroLRELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateSparseNonZeroLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateSparseNonZeroELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUl2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateSparseNonZeroSELUL2HingeOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxL2HingeOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - w); pDelta[pos2] = diff; pos1 += cData._warpSize; } } } void kCalculateIndexedSparseL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kIndexedCalculateSparseNonZeroSigmoidL2HingeOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroTanhL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroTanhL2HingeOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroLinearL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroRELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroLRELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateIndexedSparseNonZeroLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateIndexedSparseNonZeroELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateIndexedSparseNonZeroSELUL2HingeOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSoftMaxL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxL2HingeOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseAnalogL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, slope); LAUNCHERROR("kCalculateSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha); LAUNCHERROR("kCalculateSparseAnalogNonZeroELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha, lambda); LAUNCHERROR("kCalculateSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t *pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat *pSparseWeight, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat *pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<typename T> void kCalculateIndexedSparseAnalogL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, slope); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha, lambda); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t); } } template<typename T> void kCalculateCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: case SoftMax: hipLaunchKernelGGL(( kCalculateSigmoidCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t); } } template<typename T> void kCalculateIndexedCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: case SoftMax: hipLaunchKernelGGL(( kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0); pos1 += cData._warpSize; } } } void kCalculateSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel"); break; case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0); pos1 += cData._warpSize; } } } void kCalculateIndexedSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel"); break; case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> void kCalculateIndexedSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> void kCalculateScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: hipLaunchKernelGGL(( kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> void kCalculateIndexedScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: hipLaunchKernelGGL(( kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._SMCE_zeroScale; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) output = w * a; pDelta[pos] = output; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = w * (a - (NNFloat)1.0); pDelta[pos2] = output; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) output = cData._SMCE_zeroScale * a; pDelta[pos] = output; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1)); uint64_t offset = pos * stride; pos1 += threadIdx.x & cData._warpMask; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = (a - w); pDelta[pos2] = output; pos1 += cData._warpSize; } } } void kCalculateSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = w * (a - (NNFloat)1.0); pDelta[pos2] = output; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1); uint64_t offset = pos * stride; pos1 += threadIdx.x & cData._warpMask; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = cData._SMCE_oneScale * (a - w); pDelta[pos2] = output; pos1 += cData._warpSize; } } } void kCalculateIndexedSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) { output = cData._SMCE_zeroScale * a; } pDelta[pos] = output; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) { output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0); } pDelta[pos2] = output; pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: cout << "unsupported activation for this cost function" << endl; getGpu().Shutdown(); exit(-1); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) { output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0); } pDelta[pos2] = output; pos1 += cData._warpSize; } } } template<typename T> void kCalculateIndexedSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: cout << "unsupported activation for this cost function" << endl; getGpu().Shutdown(); exit(-1); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a- t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<typename T> void kCalculateL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateSigmoidL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidL1OutputDelta_kernel"); break; case Tanh: hipLaunchKernelGGL(( kCalculateTanhL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateTanhL1OutputDelta_kernel"); break; case Linear: hipLaunchKernelGGL(( kCalculateLinearL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateLinearL1OutputDelta_kernel"); break; case RectifiedLinear: hipLaunchKernelGGL(( kCalculateRELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: hipLaunchKernelGGL(( kCalculateLRELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, slope); LAUNCHERROR("kCalculateLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: hipLaunchKernelGGL(( kCalculateELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha); LAUNCHERROR("kCalculateELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: hipLaunchKernelGGL(( kCalculateSELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData, alpha, lambda); LAUNCHERROR("kCalculateSELUL1OutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a- t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<typename T> void kCalculateIndexedL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateIndexedSigmoidL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSigmoidL1OutputDelta_kernel"); break; case Tanh: hipLaunchKernelGGL(( kCalculateIndexedTanhL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedTanhL1OutputDelta_kernel"); break; case Linear: hipLaunchKernelGGL(( kCalculateIndexedLinearL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedLinearL1OutputDelta_kernel"); break; case RectifiedLinear: hipLaunchKernelGGL(( kCalculateIndexedRELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: hipLaunchKernelGGL(( kCalculateIndexedLRELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, slope); LAUNCHERROR("kCalculateIndexedLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: hipLaunchKernelGGL(( kCalculateIndexedELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, alpha); LAUNCHERROR("kCalculateIndexedELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: hipLaunchKernelGGL(( kCalculateIndexedSELUL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pData, alpha, lambda); LAUNCHERROR("kCalculateIndexedSELUL1OutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a) * a * ((NNFloat)1.0 - a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawTanhL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a) * ((NNFloat)1.0 - a * a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLinearL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawRELUL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * (a > (NNFloat)0.0); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawELUL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a > (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSELUL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLRELUL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } void kCalculateSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroTanhL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroTanhL1OutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroLinearL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroLinearL1OutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroRELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateSparseNonZeroELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateSparseNonZeroSELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateSparseNonZeroSELUL1OutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a > (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } void kCalculateIndexedSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { hipMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSigmoidL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawTanhL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLinearL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawRELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawLRELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { hipLaunchKernelGGL(( kCalculateSparseRawSELUL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL1OutputDelta_kernel"); } hipLaunchKernelGGL(( kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparsenessPenalty_kernel(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; // Calculate sum of activations if (pos < stride) { NNFloat pi = (NNFloat)0.0; for (int i = 0; i < batch; i++) { pi += pUnit[pos]; pos += stride; } // Calculate sparseness penalty pi /= (NNFloat)batch; pi = max(MIN_ACTIVATION, min(MAX_ACTIVATION, pi)); NNFloat penalty = beta * (-p / pi + ((NNFloat)1.0 - p) / ((NNFloat)1.0 - pi)); // Apply sparseness penalty to deltas pos = blockIdx.x * blockDim.x + threadIdx.x; for (int i = 0; i < batch; i++) { pDelta[pos] += penalty; pos += stride; } } } // Calculates and applies sparseness penalty to hidden layers void kCalculateSparsenessPenalty(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta) { dim3 grid1(CalculateBlocks(stride)); hipLaunchKernelGGL(( kCalculateSparsenessPenalty_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, batch, stride, pUnit, pDelta, p, beta); LAUNCHERROR("kCalculateSparsenessPenalty_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateSigmoidHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat d = pDelta[pos]; pDelta[pos] = x * ((NNFloat)1.0 - x) * d; } } __global__ void LAUNCH_BOUNDS() kCalculateTanhHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat scale, NNFloat oneOverScale) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat d = pDelta[pos]; x *= oneOverScale; pDelta[pos] = scale * ((NNFloat)1.0 - x * x) * d; } } __global__ void LAUNCH_BOUNDS() kCalculateRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) pDelta[pos] = (NNFloat)0.0; } } __global__ void LAUNCH_BOUNDS() kCalculateLRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) { pDelta[pos] *= slope; } } } __global__ void LAUNCH_BOUNDS() kCalculateELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) pDelta[pos] *= (x + alpha); } } __global__ void LAUNCH_BOUNDS() kCalculateSELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat delta = pDelta[pos]; if (x > (NNFloat)0.0) { delta *= lambda; } else { delta *= (x + lambda * alpha); } pDelta[pos] = delta; } } void kCalculateHadamardProduct(Activation activation, uint64_t size, NNFloat scale, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint32_t blocks = CalculateBlocks(size); NNFloat oneOverScale = (NNFloat)1.0 / scale; switch (activation) { case Sigmoid: hipLaunchKernelGGL(( kCalculateSigmoidHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateSigmoidHadamardProduct_kernel"); break; case Tanh: hipLaunchKernelGGL(( kCalculateTanhHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, scale, oneOverScale); LAUNCHERROR("kCalculateTanhHadamardProduct_kernel"); break; case Linear: // Derivative of linear output is 1, no need to call any kernel here break; case RectifiedLinear: hipLaunchKernelGGL(( kCalculateRELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta); LAUNCHERROR("kCalculateRELUHadamardProduct_kernel"); break; case LeakyRectifiedLinear: hipLaunchKernelGGL(( kCalculateLRELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateLRELUHadamardProduct_kernel"); break; case ExponentialLinear: hipLaunchKernelGGL(( kCalculateELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateELUHadamardProduct_kernel"); break; case ScaledExponentialLinear: hipLaunchKernelGGL(( kCalculateSELUHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSELUHadamardProduct_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kNormalizeDeltas_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Calculate vector length uint32_t pos = tgx; NNFloat r2 = (NNFloat)0.0; while (pos < stride) { NNFloat x = pDelta[pos]; r2 += x * x; pos += cData._warpSize; } // Reduce sum REDUCE(r2) // Normalalize vector if too large if (r2 > norm * norm) { norm *= rsqrt(r2); pos = tgx; while (pos < stride) { pDelta[pos] *= norm; pos += cData._warpSize; } } } } void kNormalizeDeltas(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta) { uint32_t blocks = (batch + 3) / 4; hipLaunchKernelGGL(( kNormalizeDeltas_kernel), dim3(blocks), dim3(128), 0, 0, norm, batch, stride, pDelta); LAUNCHERROR("kNormalizeDeltas_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateDeltaMagnitudes_kernel(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Calculate vector length uint32_t pos = tgx; NNFloat r2 = (NNFloat)0.0; while (pos < stride) { NNFloat x = pDelta[pos]; r2 += x * x; pos += cData._warpSize; } // Reduce sum REDUCE(r2) // Output result if (tgx == 0) pMagnitude[dpos] = r2; } } void kCalculateDeltaMagnitudes(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t blocks = (batch + 3) / 4; hipLaunchKernelGGL(( kCalculateDeltaMagnitudes_kernel), dim3(blocks), dim3(128), 0, 0, batch, stride, pDelta, pMagnitude); LAUNCHERROR("kCalculateDeltaMagnitudes_kernel"); } __global__ void LAUNCH_BOUNDS() kNormalizeDeltaMagnitudes_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Normalalize vector if too large NNFloat r2 = pMagnitude[dpos]; if (r2 > norm * norm) { norm *= rsqrt(r2); uint32_t pos = tgx; while (pos < stride) { pDelta[pos] *= norm; pos += cData._warpSize; } } } } void kNormalizeDeltaMagnitudes(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t blocks = (batch + 3) / 4; hipLaunchKernelGGL(( kNormalizeDeltaMagnitudes_kernel), dim3(blocks), dim3(128), 0, 0, norm, batch, stride, pDelta, pMagnitude); LAUNCHERROR("kNormalizeDeltaMagnitudes_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateMaxoutDelta_kernel(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat s = pSrc[pos]; NNFloat sdelta = pSrcDelta[pos]; NNFloat d = pDst[pos]; NNFloat delta = (s == d) ? sdelta : (NNFloat)0; if (beta == (NNFloat)0) pDstDelta[pos] = delta; else if (delta != (NNFloat)0.0) pDstDelta[pos] = beta * pDstDelta[pos] + delta; } } void kCalculateMaxoutDelta(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta) { unsigned long blocks = CalculateBlocks(size); hipLaunchKernelGGL(( kCalculateMaxoutDelta_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pSrc, pSrcDelta, size, beta, pDst, pDstDelta); LAUNCHERROR("kCalculateMaxoutDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateCosineDelta_kernel(NNFloat* pDPDelta, NNFloat* pDP, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { // Preincrement pointers p0Vector += blockIdx.x * inputStride + threadIdx.x; pVector += blockIdx.x * inputStride + threadIdx.x; pDPDelta += blockIdx.x * stride; pDP += blockIdx.x * stride; pA += blockIdx.x * stride; pB += blockIdx.x * stride; pDelta0 += blockIdx.x * inputStride + threadIdx.x; pDelta += blockIdx.x * inputStride + threadIdx.x; uint32_t pos = threadIdx.x; NNFloat dp = *pDP; NNFloat dpDelta = *pDPDelta; NNFloat a = *pA; NNFloat b = *pB; NNFloat ab = a * b; NNFloat a2 = a * a; NNFloat b2 = b * b; // Calculate deltas while (pos < inputStride) { NNFloat ai = *p0Vector; NNFloat bi = *pVector; NNFloat delta0 = dpDelta * ((bi / ab) - (ai * dp / a2)); NNFloat delta = dpDelta * ((ai / ab) - (bi * dp / b2)); if (beta0 == (NNFloat)0) *pDelta0 = delta0; else *pDelta0 = *pDelta0 + beta0 * delta0; if (beta == (NNFloat)0) *pDelta = delta; else *pDelta = *pDelta + beta * delta; pDelta0 += blockDim.x; pDelta += blockDim.x; p0Vector += blockDim.x; pVector += blockDim.x; pos += blockDim.x; } } void kCalculateCosineDelta(NNFloat* pDPDeltaIn, NNFloat* pDPIn, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { unsigned long blocks = batch; unsigned long threadsPerBlock = ::min(stride, getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateCosineDelta_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pDPDeltaIn, pDPIn, pA, pB, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride); LAUNCHERROR("kCalculateCosineDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateDotProductDelta_kernel(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { // Preincrement pointers p0Vector += blockIdx.x * inputStride + threadIdx.x; pVector += blockIdx.x * inputStride + threadIdx.x; pDPDelta += blockIdx.x * stride; pDelta0 += blockIdx.x * inputStride + threadIdx.x; pDelta += blockIdx.x * inputStride + threadIdx.x; uint32_t pos = threadIdx.x; NNFloat dpDelta = *pDPDelta; // Calculate deltas while (pos < inputStride) { NNFloat ai = *p0Vector; NNFloat bi = *pVector; NNFloat delta0 = dpDelta * bi; NNFloat delta = dpDelta * ai; if (beta0 == (NNFloat)0) *pDelta0 = delta0; else *pDelta0 = *pDelta0 + beta0 * delta0; if (beta == (NNFloat)0) *pDelta = delta; else *pDelta = *pDelta + beta * delta; pDelta0 += blockDim.x; pDelta += blockDim.x; p0Vector += blockDim.x; pVector += blockDim.x; pos += blockDim.x; } } void kCalculateDotProductDelta(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { unsigned long blocks = batch; unsigned long threadsPerBlock = ::min(stride, getGpu()._threadsPerBlock); hipLaunchKernelGGL(( kCalculateDotProductDelta_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pDPDelta, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride); LAUNCHERROR("kCalculateDotProductDelta_kernel"); } // Instantiates allowable templated functions so we can hide the implementations here // instead of in the header file because we're mixing CUDA and C++ and that's // a migraine headache in the making otherwise. #define EXPLICITLY_INSTANTIATE_KERNELS(T) \ template void kCalculateL1OutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateIndexedL1OutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateL2HingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateIndexedL2HingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateIndexedCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \ template void kCalculateScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateIndexedScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \ template void kCalculateOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateIndexedOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateHingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateIndexedHingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \ template void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \ template void kCalculateIndexedSparseDataScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \ template void kCalculateSparseAnalogOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat*, T*, bool, NNFloat, NNFloat, NNFloat); \ template void kCalculateIndexedSparseAnalogOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat*, T*, bool, NNFloat, NNFloat, NNFloat); \ template void kCalculateSparseAnalogL2HingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat*, T*, bool, NNFloat, NNFloat, NNFloat); \ template void kCalculateIndexedSparseAnalogL2HingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat*, T*, bool, NNFloat, NNFloat, NNFloat); \ /**/ EXPLICITLY_INSTANTIATE_KERNELS(NNFloat) EXPLICITLY_INSTANTIATE_KERNELS(double) EXPLICITLY_INSTANTIATE_KERNELS(unsigned char) EXPLICITLY_INSTANTIATE_KERNELS(char) EXPLICITLY_INSTANTIATE_KERNELS(uint32_t) EXPLICITLY_INSTANTIATE_KERNELS(uint64_t) EXPLICITLY_INSTANTIATE_KERNELS(int32_t) EXPLICITLY_INSTANTIATE_KERNELS(int64_t)
956331a1d1264f54a54974ba9d9f52a17589e437.cu
/* Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "GpuTypes.h" #include "NNTypes.h" #include <limits> static __constant__ GpuData cData; void SetKDeltaGpuData() { cudaError_t status; status = cudaMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData)); RTERROR(status, "cudaMemcpyToSymbol: SetKDeltaGpuData copy to cData failed"); } void GetKDeltaGpuData() { cudaError_t status; status = cudaMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData)); RTERROR(status, "cudaMemcpyFromSymbol: GetKDeltaGpuData copy From cData failed"); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> void kCalculateOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateSigmoidOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidOutputDelta_kernel"); break; case Tanh: kCalculateTanhOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateTanhOutputDelta_kernel"); break; case Linear: kCalculateLinearOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateLinearOutputDelta_kernel"); break; case RectifiedLinear: kCalculateRELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: kCalculateLRELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, slope); LAUNCHERROR("kCalculateLRELUOutputDelta_kernel"); break; case ExponentialLinear: kCalculateELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha); LAUNCHERROR("kCalculateELUOutputDelta_kernel"); break; case ScaledExponentialLinear: kCalculateSELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha, lambda); LAUNCHERROR("kCalculateSELUOutputDelta_kernel"); break; case SoftMax: kCalculateSoftMaxOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t) * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = a - t; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = a - t; } } template<typename T> void kCalculateIndexedOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateIndexedSigmoidOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSigmoidOutputDelta_kernel"); break; case Tanh: kCalculateIndexedTanhOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedTanhOutputDelta_kernel"); break; case Linear: kCalculateIndexedLinearOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedLinearOutputDelta_kernel"); break; case RectifiedLinear: kCalculateIndexedRELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: kCalculateIndexedLRELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, slope); LAUNCHERROR("kCalculateIndexedLRELUOutputDelta_kernel"); break; case ExponentialLinear: kCalculateIndexedELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, alpha); LAUNCHERROR("kCalculateIndexedELUOutputDelta_kernel"); break; case ScaledExponentialLinear: kCalculateIndexedSELUOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, alpha, lambda); LAUNCHERROR("kCalculateIndexedSELUOutputDelta_kernel"); break; case SoftMax: kCalculateIndexedSoftMaxOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<typename T> void kCalculateL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateSigmoidL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidL2HingeOutputDelta_kernel"); break; case Tanh: kCalculateTanhL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateTanhL2HingeOutputDelta_kernel"); break; case Linear: kCalculateLinearL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: kCalculateRELUL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: kCalculateLRELUL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, slope); LAUNCHERROR("kCalculateLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: kCalculateELUL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha); LAUNCHERROR("kCalculateELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: kCalculateSELUL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha, lambda); LAUNCHERROR("kCalculateSELUL2HingeOutputDelta_kernel"); break; case SoftMax: kCalculateSoftMaxL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSoftMaxL2HingeOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) + (a < (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff * ((a >= (NNFloat)0.0) * lambda + (a < (NNFloat)0.0) * (lambda * alpha * exp(a))); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[uOffset + pos] = diff; } } template<typename T> void kCalculateIndexedL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateIndexedSigmoidL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSigmoidL2HingeOutputDelta_kernel"); break; case Tanh: kCalculateIndexedTanhL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedTanhL2HingeOutputDelta_kernel"); break; case Linear: kCalculateIndexedLinearL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: kCalculateIndexedRELUL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: kCalculateIndexedLRELUL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, slope); LAUNCHERROR("kCalculateIndexedLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: kCalculateIndexedELUL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, alpha); LAUNCHERROR("kCalculateIndexedELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: kCalculateIndexedSELUL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, alpha, lambda); LAUNCHERROR("kCalculateIndexedSELUL2HIngeOutputDelta_kernel"); break; case SoftMax: kCalculateIndexedSoftMaxL2HingeOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSoftMaxL2HingeOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = pData[pos]; pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 256.0); pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 128.0); pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<typename T> void kCalculateHingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { unsigned long threads = max(32, min(stride, getGpu()._threadsPerBlock)); kCalculateHingeOutputDelta_kernel<<<batch, threads>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateHingeOutputDelta_kernel"); } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = pData[pos]; pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 256.0); pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedHingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = threadIdx.x; uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; pUnit += uOffset; pDelta += uOffset; pData += dOffset; while (pos < stride) { NNFloat a = pUnit[pos]; NNFloat t = (NNFloat)pData[pos] * (NNFloat)(1.0 / 128.0); pDelta[pos] = (a < (NNFloat)0.0) ? -t : (NNFloat)0.0; pos += blockDim.x; } } template<typename T> void kCalculateIndexedHingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { unsigned long threads = max(32, min(stride, getGpu()._threadsPerBlock)); kCalculateIndexedHingeOutputDelta_kernel<<<batch, threads>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateHingeOutputDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * a * ((NNFloat)1.0 - a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawTanhOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * ((NNFloat)1.0 - a * a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLinearOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawRELUOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * (a > (NNFloat)0.0); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLRELUOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawELUOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSELUOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSoftMaxOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = a - w; pos1 += cData._warpSize; } } } void kCalculateSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSigmoidOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } kCalculateSparseNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } kCalculateSparseNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } kCalculateSparseNonZeroRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } kCalculateSparseNonZeroLRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateSparseNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } kCalculateSparseNonZeroELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateSparseNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } kCalculateSparseNonZeroSELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateSparseNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } kCalculateSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = a - w; pos1 += cData._warpSize; } } } void kCalculateIndexedSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kIndexedCalculateSparseNonZeroSigmoidOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateIndexedSparseNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateIndexedSparseNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateIndexedSparseNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, slope); LAUNCHERROR("kCalculateSparseAnalogNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha); LAUNCHERROR("kCalculateSparseAnalogNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha, lambda); LAUNCHERROR("kCalculateSparseAnalogNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t *pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat *pSparseWeight, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat *pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> void kCalculateIndexedSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSigmoidOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroTanhOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLinearOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroRELUOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, slope); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLRELUOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroELUOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha, lambda); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSELUOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSoftMaxOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a * a * ((NNFloat)1.0 - a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawTanhL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a * ((NNFloat)1.0 - a * a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLinearL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawRELUL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a * (a > (NNFloat)0.0); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLRELUL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawELUL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSELUL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = (NNFloat)1.0; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = max((NNFloat)0.0, pUnit[pos]); pDelta[pos] = w * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - w); pDelta[pos2] = diff; pos1 += cData._warpSize; } } } void kCalculateSparseL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSigmoidSparseL2HingeOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL2HingeOutputDelta_kernel"); } kCalculateSparseNonZeroTanhL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroTanhL2HingeOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL2HingeOutputDelta_kernel"); } kCalculateSparseNonZeroLinearL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL2HingeOutputDelta_kernel"); } kCalculateSparseNonZeroRELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL2HingeOutputDelta_kernel"); } kCalculateSparseNonZeroLRELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateSparseNonZeroLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL2HingeOutputDelta_kernel"); } kCalculateSparseNonZeroELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateSparseNonZeroELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUl2HingeOutputDelta_kernel"); } kCalculateSparseNonZeroSELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateSparseNonZeroSELUL2HingeOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel"); } kCalculateSparseNonZeroSoftMaxL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxL2HingeOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - (NNFloat)1.0); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1); pos1 += threadIdx.x & cData._warpMask; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat diff = min((NNFloat)0.0, a - w); pDelta[pos2] = diff; pos1 += cData._warpSize; } } } void kCalculateIndexedSparseL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSigmoidL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kIndexedCalculateSparseNonZeroSigmoidL2HingeOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroTanhL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroTanhL2HingeOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroLinearL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroRELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroLRELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateIndexedSparseNonZeroLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateIndexedSparseNonZeroELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateIndexedSparseNonZeroSELUL2HingeOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSoftMaxL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxL2HingeOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseAnalogL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL2HingeOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL2HingeOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL2HingeOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL2HingeOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, slope); LAUNCHERROR("kCalculateSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL2HingeOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha); LAUNCHERROR("kCalculateSparseAnalogNonZeroELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL2HingeOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha, lambda); LAUNCHERROR("kCalculateSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t *pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat *pSparseWeight, unsigned char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat diff = a - fabsf(t); diff = (t > (T)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat *pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); NNFloat diff = a - fabsf(t); diff = (t > (NNFloat)0.0) ? min((NNFloat)0.0f, diff) : max((NNFloat)0.0, diff); pDelta[pos2] = w * diff; pos1 += cData._warpSize; } } } template<typename T> void kCalculateIndexedSparseAnalogL2HingeOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSigmoidL2HingeOutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroTanhL2HingeOutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLinearL2HingeOutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroRELUL2HingeOutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, slope); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroLRELUL2HingeOutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroELUL2HingeOutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData, alpha, lambda); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSELUL2HingeOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxL2HingeOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonZeroSoftMaxL2HingeOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t); } } template<typename T> void kCalculateCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: case SoftMax: kCalculateSigmoidCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); pDelta[uOffset + pos] = (a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); pDelta[uOffset + pos] = (a - t); } } template<typename T> void kCalculateIndexedCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: case SoftMax: kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSigmoidCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * a; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0); pos1 += cData._warpSize; } } } void kCalculateSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } kCalculateSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel"); break; case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * (a - (NNFloat)1.0); pos1 += cData._warpSize; } } } void kCalculateIndexedSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxOutputDelta_kernel"); break; case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, unsigned char* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint64_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint32_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0); pDelta[pos2] = w * (a - t); pos1 += cData._warpSize; } } } template<typename T> void kCalculateIndexedSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case SoftMax: case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel"); } kCalculateIndexedSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, pSparseData); LAUNCHERROR("kCalculateIndexedSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> void kCalculateScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0); NNFloat output = (NNFloat)0.0; if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget)) output = cData._SMCE_oneScale * (a - t); else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget)) output = cData._SMCE_zeroScale * (a - t); pDelta[uOffset + pos] = output; } } template<typename T> void kCalculateIndexedScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSigmoidScaledMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._SMCE_zeroScale; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) output = w * a; pDelta[pos] = output; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = w * (a - (NNFloat)1.0); pDelta[pos2] = output; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) output = cData._SMCE_zeroScale * a; pDelta[pos] = output; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1)); uint64_t offset = pos * stride; pos1 += threadIdx.x & cData._warpMask; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = (a - w); pDelta[pos2] = output; pos1 += cData._warpSize; } } } void kCalculateSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); } kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._SMCE_oneScale * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = w * (a - (NNFloat)1.0); pDelta[pos2] = output; pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos]; uint64_t end = pSparseEnd[dpos]; NNFloat w = (pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0 / (NNFloat)(end - pos1); uint64_t offset = pos * stride; pos1 += threadIdx.x & cData._warpMask; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) output = cData._SMCE_oneScale * (a - w); pDelta[pos2] = output; pos1 += cData._warpSize; } } } void kCalculateIndexedSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: if (!bSparseIgnoreZero) { kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat a = pUnit[pos]; NNFloat output = (NNFloat)0.0; if (a > cData._SMCE_zeroTarget) { output = cData._SMCE_zeroScale * a; } pDelta[pos] = output; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) { output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0); } pDelta[pos2] = output; pos1 += cData._warpSize; } } } template<typename T> void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: cout << "unsupported activation for this cost function" << endl; getGpu().Shutdown(); exit(-1); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; T t = pSparseData[pos1]; NNFloat output = (NNFloat)0.0; if (a < cData._SMCE_oneTarget) { output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0); } pDelta[pos2] = output; pos1 += cData._warpSize; } } } template<typename T> void kCalculateIndexedSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); LAUNCHERROR("kCalculateIndexedSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel"); break; case SoftMax: cout << "unsupported activation for this cost function" << endl; getGpu().Shutdown(); exit(-1); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a- t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<typename T> void kCalculateL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateSigmoidL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateSigmoidL1OutputDelta_kernel"); break; case Tanh: kCalculateTanhL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateTanhL1OutputDelta_kernel"); break; case Linear: kCalculateLinearL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateLinearL1OutputDelta_kernel"); break; case RectifiedLinear: kCalculateRELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData); LAUNCHERROR("kCalculateRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: kCalculateLRELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, slope); LAUNCHERROR("kCalculateLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: kCalculateELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha); LAUNCHERROR("kCalculateELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: kCalculateSELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData, alpha, lambda); LAUNCHERROR("kCalculateSELUL1OutputDelta_kernel"); break; } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<typename T> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = pData[dOffset + pos]; pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a- t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, unsigned char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0); pDelta[uOffset + pos] = sgn(a - t) * a * ((NNFloat)1.0 - a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((NNFloat)1.0 - a * a); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * (a > (NNFloat)0.0); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat slope) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } template<> __global__ void LAUNCH_BOUNDS() kCalculateIndexedSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, char* pData, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; if (pos < stride) { uint64_t uOffset = blockIdx.x * stride; uint64_t dOffset = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x] * stride; NNFloat a = pUnit[uOffset + pos]; NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0); pDelta[uOffset + pos] = sgn(a - t) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } template<typename T> void kCalculateIndexedL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, T* pData, NNFloat slope, NNFloat alpha, NNFloat lambda) { dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); switch (activation) { case Sigmoid: kCalculateIndexedSigmoidL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedSigmoidL1OutputDelta_kernel"); break; case Tanh: kCalculateIndexedTanhL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedTanhL1OutputDelta_kernel"); break; case Linear: kCalculateIndexedLinearL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedLinearL1OutputDelta_kernel"); break; case RectifiedLinear: kCalculateIndexedRELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData); LAUNCHERROR("kCalculateIndexedRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: kCalculateIndexedLRELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, slope); LAUNCHERROR("kCalculateIndexedLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: kCalculateIndexedELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, alpha); LAUNCHERROR("kCalculateIndexedELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: kCalculateIndexedSELUL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pData, alpha, lambda); LAUNCHERROR("kCalculateIndexedSELUL1OutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSigmoidL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a) * a * ((NNFloat)1.0 - a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawTanhL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a) * ((NNFloat)1.0 - a * a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLinearL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawRELUL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * (a > (NNFloat)0.0); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawELUL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a > (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawSELUL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateSparseRawLRELUL1OutputDelta_kernel(uint32_t position, NNFloat* pSparseWeight, uint32_t stride, uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; if (pos < size) { NNFloat w = cData._deltaBoost_zero; if (pSparseWeight != NULL) { uint64_t dpos = (pos / stride) + position; dpos = cData._bShuffleIndices ? cData._pShuffleIndex[dpos] : dpos; w *= pSparseWeight[dpos]; } NNFloat a = pUnit[pos]; pDelta[pos] = w * sgn(a) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); } } __global__ void LAUNCH_BOUNDS() kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } void kCalculateSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL1OutputDelta_kernel"); } kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL1OutputDelta_kernel"); } kCalculateSparseNonZeroTanhL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroTanhL1OutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL1OutputDelta_kernel"); } kCalculateSparseNonZeroLinearL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroLinearL1OutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL1OutputDelta_kernel"); } kCalculateSparseNonZeroRELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateSparseNonZeroRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL1OutputDelta_kernel"); } kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateSparseNonZeroRawLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL1OutputDelta_kernel"); } kCalculateSparseNonZeroELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateSparseNonZeroELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL1OutputDelta_kernel"); } kCalculateSparseNonZeroSELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateSparseNonZeroSELUL1OutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * (a > (NNFloat)0.0); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a > (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * (a + alpha)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat alpha, NNFloat lambda) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) * lambda + (a <= (NNFloat)0.0) * lambda * alpha * exp(a)); pos1 += cData._warpSize; } } } __global__ void LAUNCH_BOUNDS() kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, NNFloat slope) { uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize; if (pos < batch) { uint32_t dpos = pIndex[cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos]; uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask); uint64_t end = pSparseEnd[dpos]; NNFloat w = cData._deltaBoost_one * ((pSparseWeight != NULL) ? pSparseWeight[dpos] : (NNFloat)1.0); uint64_t offset = pos * stride; while (pos1 < end) { uint64_t pos2 = offset + pSparseIndex[pos1]; NNFloat a = pUnit[pos2]; pDelta[pos2] = w * sgn(a - (NNFloat)1.0) * ((a > (NNFloat)0.0) + (a <= (NNFloat)0.0) * slope); pos1 += cData._warpSize; } } } void kCalculateIndexedSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint32_t* pIndex, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, NNFloat* pSparseWeight, bool bSparseIgnoreZero, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint64_t size = (uint64_t)batch * (uint64_t)stride; dim3 grid1(CalculateBlocks(size)); dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize)); // Clear entire delta if ignoring zero outputs if (bSparseIgnoreZero) { cudaMemset(pDelta, 0, size * sizeof(NNFloat)); } switch (activation) { case Sigmoid: if (!bSparseIgnoreZero) { kCalculateSparseRawSigmoidL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawSigmoidL1OutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroSigmoidL1OutputDelta_kernel"); break; case Tanh: if (!bSparseIgnoreZero) { kCalculateSparseRawTanhL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawTanhL1OutputDelta_kernel"); } kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroTanhL1OutputDelta_kernel"); break; case Linear: if (!bSparseIgnoreZero) { kCalculateSparseRawLinearL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawLinearL1OutputDelta_kernel"); } kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroLinearL1OutputDelta_kernel"); break; case RectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawRELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta); LAUNCHERROR("kCalculateSparseRawRELUL1OutputDelta_kernel"); } kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight); LAUNCHERROR("kCalculateIndexedSparseNonZeroRELUL1OutputDelta_kernel"); break; case LeakyRectifiedLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawLRELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateSparseRawLRELUL1OutputDelta_kernel"); } kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, slope); LAUNCHERROR("kCalculateIndexedSparseNonZeroRawLRELUL1OutputDelta_kernel"); break; case ExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateSparseRawELUL1OutputDelta_kernel"); } kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha); LAUNCHERROR("kCalculateIndexedSparseNonZeroELUL1OutputDelta_kernel"); break; case ScaledExponentialLinear: if (!bSparseIgnoreZero) { kCalculateSparseRawSELUL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(position, pSparseWeight, stride, size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSparseRawSELUL1OutputDelta_kernel"); } kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pIndex, pSparseStart, pSparseEnd, pSparseIndex, pSparseWeight, alpha, lambda); LAUNCHERROR("kCalculateIndexedSparseNonZeroSELUL1OutputDelta_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kCalculateSparsenessPenalty_kernel(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; // Calculate sum of activations if (pos < stride) { NNFloat pi = (NNFloat)0.0; for (int i = 0; i < batch; i++) { pi += pUnit[pos]; pos += stride; } // Calculate sparseness penalty pi /= (NNFloat)batch; pi = max(MIN_ACTIVATION, min(MAX_ACTIVATION, pi)); NNFloat penalty = beta * (-p / pi + ((NNFloat)1.0 - p) / ((NNFloat)1.0 - pi)); // Apply sparseness penalty to deltas pos = blockIdx.x * blockDim.x + threadIdx.x; for (int i = 0; i < batch; i++) { pDelta[pos] += penalty; pos += stride; } } } // Calculates and applies sparseness penalty to hidden layers void kCalculateSparsenessPenalty(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta) { dim3 grid1(CalculateBlocks(stride)); kCalculateSparsenessPenalty_kernel<<<grid1, getGpu()._threadsPerBlock>>>(batch, stride, pUnit, pDelta, p, beta); LAUNCHERROR("kCalculateSparsenessPenalty_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateSigmoidHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat d = pDelta[pos]; pDelta[pos] = x * ((NNFloat)1.0 - x) * d; } } __global__ void LAUNCH_BOUNDS() kCalculateTanhHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat scale, NNFloat oneOverScale) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat d = pDelta[pos]; x *= oneOverScale; pDelta[pos] = scale * ((NNFloat)1.0 - x * x) * d; } } __global__ void LAUNCH_BOUNDS() kCalculateRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) pDelta[pos] = (NNFloat)0.0; } } __global__ void LAUNCH_BOUNDS() kCalculateLRELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) { pDelta[pos] *= slope; } } } __global__ void LAUNCH_BOUNDS() kCalculateELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; if (x <= (NNFloat)0.0) pDelta[pos] *= (x + alpha); } } __global__ void LAUNCH_BOUNDS() kCalculateSELUHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat alpha, NNFloat lambda) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat x = pUnit[pos]; NNFloat delta = pDelta[pos]; if (x > (NNFloat)0.0) { delta *= lambda; } else { delta *= (x + lambda * alpha); } pDelta[pos] = delta; } } void kCalculateHadamardProduct(Activation activation, uint64_t size, NNFloat scale, NNFloat* pUnit, NNFloat* pDelta, NNFloat slope, NNFloat alpha, NNFloat lambda) { uint32_t blocks = CalculateBlocks(size); NNFloat oneOverScale = (NNFloat)1.0 / scale; switch (activation) { case Sigmoid: kCalculateSigmoidHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateSigmoidHadamardProduct_kernel"); break; case Tanh: kCalculateTanhHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, scale, oneOverScale); LAUNCHERROR("kCalculateTanhHadamardProduct_kernel"); break; case Linear: // Derivative of linear output is 1, no need to call any kernel here break; case RectifiedLinear: kCalculateRELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta); LAUNCHERROR("kCalculateRELUHadamardProduct_kernel"); break; case LeakyRectifiedLinear: kCalculateLRELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, slope); LAUNCHERROR("kCalculateLRELUHadamardProduct_kernel"); break; case ExponentialLinear: kCalculateELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha); LAUNCHERROR("kCalculateELUHadamardProduct_kernel"); break; case ScaledExponentialLinear: kCalculateSELUHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, alpha, lambda); LAUNCHERROR("kCalculateSELUHadamardProduct_kernel"); break; } } __global__ void LAUNCH_BOUNDS() kNormalizeDeltas_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Calculate vector length uint32_t pos = tgx; NNFloat r2 = (NNFloat)0.0; while (pos < stride) { NNFloat x = pDelta[pos]; r2 += x * x; pos += cData._warpSize; } // Reduce sum REDUCE(r2) // Normalalize vector if too large if (r2 > norm * norm) { norm *= rsqrt(r2); pos = tgx; while (pos < stride) { pDelta[pos] *= norm; pos += cData._warpSize; } } } } void kNormalizeDeltas(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta) { uint32_t blocks = (batch + 3) / 4; kNormalizeDeltas_kernel<<<blocks, 128>>>(norm, batch, stride, pDelta); LAUNCHERROR("kNormalizeDeltas_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateDeltaMagnitudes_kernel(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Calculate vector length uint32_t pos = tgx; NNFloat r2 = (NNFloat)0.0; while (pos < stride) { NNFloat x = pDelta[pos]; r2 += x * x; pos += cData._warpSize; } // Reduce sum REDUCE(r2) // Output result if (tgx == 0) pMagnitude[dpos] = r2; } } void kCalculateDeltaMagnitudes(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t blocks = (batch + 3) / 4; kCalculateDeltaMagnitudes_kernel<<<blocks, 128>>>(batch, stride, pDelta, pMagnitude); LAUNCHERROR("kCalculateDeltaMagnitudes_kernel"); } __global__ void LAUNCH_BOUNDS() kNormalizeDeltaMagnitudes_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; uint32_t tgx = threadIdx.x & cData._warpMask; pDelta += dpos * stride; if (dpos < batch) { // Normalalize vector if too large NNFloat r2 = pMagnitude[dpos]; if (r2 > norm * norm) { norm *= rsqrt(r2); uint32_t pos = tgx; while (pos < stride) { pDelta[pos] *= norm; pos += cData._warpSize; } } } } void kNormalizeDeltaMagnitudes(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude) { uint32_t blocks = (batch + 3) / 4; kNormalizeDeltaMagnitudes_kernel<<<blocks, 128>>>(norm, batch, stride, pDelta, pMagnitude); LAUNCHERROR("kNormalizeDeltaMagnitudes_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateMaxoutDelta_kernel(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta) { uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; if (pos < size) { NNFloat s = pSrc[pos]; NNFloat sdelta = pSrcDelta[pos]; NNFloat d = pDst[pos]; NNFloat delta = (s == d) ? sdelta : (NNFloat)0; if (beta == (NNFloat)0) pDstDelta[pos] = delta; else if (delta != (NNFloat)0.0) pDstDelta[pos] = beta * pDstDelta[pos] + delta; } } void kCalculateMaxoutDelta(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta) { unsigned long blocks = CalculateBlocks(size); kCalculateMaxoutDelta_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pSrc, pSrcDelta, size, beta, pDst, pDstDelta); LAUNCHERROR("kCalculateMaxoutDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateCosineDelta_kernel(NNFloat* pDPDelta, NNFloat* pDP, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { // Preincrement pointers p0Vector += blockIdx.x * inputStride + threadIdx.x; pVector += blockIdx.x * inputStride + threadIdx.x; pDPDelta += blockIdx.x * stride; pDP += blockIdx.x * stride; pA += blockIdx.x * stride; pB += blockIdx.x * stride; pDelta0 += blockIdx.x * inputStride + threadIdx.x; pDelta += blockIdx.x * inputStride + threadIdx.x; uint32_t pos = threadIdx.x; NNFloat dp = *pDP; NNFloat dpDelta = *pDPDelta; NNFloat a = *pA; NNFloat b = *pB; NNFloat ab = a * b; NNFloat a2 = a * a; NNFloat b2 = b * b; // Calculate deltas while (pos < inputStride) { NNFloat ai = *p0Vector; NNFloat bi = *pVector; NNFloat delta0 = dpDelta * ((bi / ab) - (ai * dp / a2)); NNFloat delta = dpDelta * ((ai / ab) - (bi * dp / b2)); if (beta0 == (NNFloat)0) *pDelta0 = delta0; else *pDelta0 = *pDelta0 + beta0 * delta0; if (beta == (NNFloat)0) *pDelta = delta; else *pDelta = *pDelta + beta * delta; pDelta0 += blockDim.x; pDelta += blockDim.x; p0Vector += blockDim.x; pVector += blockDim.x; pos += blockDim.x; } } void kCalculateCosineDelta(NNFloat* pDPDeltaIn, NNFloat* pDPIn, NNFloat* pA, NNFloat* pB, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { unsigned long blocks = batch; unsigned long threadsPerBlock = std::min(stride, getGpu()._threadsPerBlock); kCalculateCosineDelta_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pDPDeltaIn, pDPIn, pA, pB, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride); LAUNCHERROR("kCalculateCosineDelta_kernel"); } __global__ void LAUNCH_BOUNDS() kCalculateDotProductDelta_kernel(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { // Preincrement pointers p0Vector += blockIdx.x * inputStride + threadIdx.x; pVector += blockIdx.x * inputStride + threadIdx.x; pDPDelta += blockIdx.x * stride; pDelta0 += blockIdx.x * inputStride + threadIdx.x; pDelta += blockIdx.x * inputStride + threadIdx.x; uint32_t pos = threadIdx.x; NNFloat dpDelta = *pDPDelta; // Calculate deltas while (pos < inputStride) { NNFloat ai = *p0Vector; NNFloat bi = *pVector; NNFloat delta0 = dpDelta * bi; NNFloat delta = dpDelta * ai; if (beta0 == (NNFloat)0) *pDelta0 = delta0; else *pDelta0 = *pDelta0 + beta0 * delta0; if (beta == (NNFloat)0) *pDelta = delta; else *pDelta = *pDelta + beta * delta; pDelta0 += blockDim.x; pDelta += blockDim.x; p0Vector += blockDim.x; pVector += blockDim.x; pos += blockDim.x; } } void kCalculateDotProductDelta(NNFloat* pDPDelta, NNFloat* p0Vector, NNFloat* pVector, uint32_t batch, uint32_t stride, NNFloat* pDelta0, NNFloat beta0, NNFloat* pDelta, NNFloat beta, uint32_t inputStride) { unsigned long blocks = batch; unsigned long threadsPerBlock = std::min(stride, getGpu()._threadsPerBlock); kCalculateDotProductDelta_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pDPDelta, p0Vector, pVector, batch, stride, pDelta0, beta0, pDelta, beta, inputStride); LAUNCHERROR("kCalculateDotProductDelta_kernel"); } // Instantiates allowable templated functions so we can hide the implementations here // instead of in the header file because we're mixing CUDA and C++ and that's // a migraine headache in the making otherwise. #define EXPLICITLY_INSTANTIATE_KERNELS(T) \ template void kCalculateL1OutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateIndexedL1OutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateL2HingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateIndexedL2HingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateIndexedCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \ template void kCalculateScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateIndexedScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \ template void kCalculateOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateIndexedOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*, NNFloat, NNFloat, NNFloat); \ template void kCalculateHingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, T*); \ template void kCalculateIndexedHingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, T*); \ template void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \ template void kCalculateIndexedSparseDataScaledMarginalCrossEntropyOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, T*, bool); \ template void kCalculateSparseAnalogOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat*, T*, bool, NNFloat, NNFloat, NNFloat); \ template void kCalculateIndexedSparseAnalogOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat*, T*, bool, NNFloat, NNFloat, NNFloat); \ template void kCalculateSparseAnalogL2HingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint64_t*, uint64_t*, uint32_t*, NNFloat*, T*, bool, NNFloat, NNFloat, NNFloat); \ template void kCalculateIndexedSparseAnalogL2HingeOutputDelta<T>(Activation, uint32_t, uint32_t, uint32_t, NNFloat*, NNFloat*, uint32_t*, uint64_t*, uint64_t*, uint32_t*, NNFloat*, T*, bool, NNFloat, NNFloat, NNFloat); \ /**/ EXPLICITLY_INSTANTIATE_KERNELS(NNFloat) EXPLICITLY_INSTANTIATE_KERNELS(double) EXPLICITLY_INSTANTIATE_KERNELS(unsigned char) EXPLICITLY_INSTANTIATE_KERNELS(char) EXPLICITLY_INSTANTIATE_KERNELS(uint32_t) EXPLICITLY_INSTANTIATE_KERNELS(uint64_t) EXPLICITLY_INSTANTIATE_KERNELS(int32_t) EXPLICITLY_INSTANTIATE_KERNELS(int64_t)
b632aeb9686a2d04e48c078cb55d7b662cdfa12e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "holder_cleaner.h" #include "holder_cleaner_kernels.h" using namespace MEM::MISC; PlanetHolderCleaner::PlanetHolderCleaner( PhxPlanetFactory *f, FilteringPolicy p ) : fact( f ) , planetsInUse( 1 ) , needChecking( false ) , filteringPolicy( p ) { } PlanetHolderCleaner::~PlanetHolderCleaner() { } void PlanetHolderCleaner::work() { if( !needChecking ) { return; } needChecking = false; createFilter(); if( filteringNeeded() ) { filterHolder(); } } void PlanetHolderCleaner::forceFilter() { if( 0 == fact->size() ) return; createFilter(); filterHolder(); needChecking = false; } void PlanetHolderCleaner::notifyCheckNeeded() { needChecking = true; } void PlanetHolderCleaner::setFilteringPolicy( FilteringPolicy p ) { filteringPolicy = p; } void PlanetHolderCleaner::createFilter() { unsigned threads = fact->size(); dim3 block( min( 512, threads ) ); dim3 grid( 1 + ( threads - 1 ) / block.x ); filter.resize(threads); hipLaunchKernelGGL(( create_filter), dim3(grid), dim3(block), 0, 0, fact->getMasses().d_data(), filter.d_data(), threads ); } bool PlanetHolderCleaner::filteringNeeded() { if( Always == filteringPolicy ) return true; if( Never == filteringPolicy ) return false; unsigned threads = fact->size(); //TODO block.x i argument template'a musz si zgadza - przydaby si jaki switch - najlepiej adnie opakowa redukcj dim3 block( 512 );//min( 512, threads ) ); dim3 grid( 1 ); hipLaunchKernelGGL(( reduceFull<unsigned, 512>), dim3(grid), dim3(block), 0, 0, filter.d_data(), planetsInUse.d_data(), threads ); //log_printf( INFO, "%u of %u planets in use.\n", planetsInUse.retrieve(), threads ); if( Frequently == filteringPolicy ) return planetsInUse.retrieve() + 20 < threads; ASSERT( Rarely == filteringPolicy ); return planetsInUse.retrieve() + 20 < 0.8 * threads; // magic numbers! } void PlanetHolderCleaner::filterHolder() { fact->filter( &filter ); }
b632aeb9686a2d04e48c078cb55d7b662cdfa12e.cu
#include "holder_cleaner.h" #include "holder_cleaner_kernels.h" using namespace MEM::MISC; PlanetHolderCleaner::PlanetHolderCleaner( PhxPlanetFactory *f, FilteringPolicy p ) : fact( f ) , planetsInUse( 1 ) , needChecking( false ) , filteringPolicy( p ) { } PlanetHolderCleaner::~PlanetHolderCleaner() { } void PlanetHolderCleaner::work() { if( !needChecking ) { return; } needChecking = false; createFilter(); if( filteringNeeded() ) { filterHolder(); } } void PlanetHolderCleaner::forceFilter() { if( 0 == fact->size() ) return; createFilter(); filterHolder(); needChecking = false; } void PlanetHolderCleaner::notifyCheckNeeded() { needChecking = true; } void PlanetHolderCleaner::setFilteringPolicy( FilteringPolicy p ) { filteringPolicy = p; } void PlanetHolderCleaner::createFilter() { unsigned threads = fact->size(); dim3 block( min( 512, threads ) ); dim3 grid( 1 + ( threads - 1 ) / block.x ); filter.resize(threads); create_filter<<<grid, block>>>( fact->getMasses().d_data(), filter.d_data(), threads ); } bool PlanetHolderCleaner::filteringNeeded() { if( Always == filteringPolicy ) return true; if( Never == filteringPolicy ) return false; unsigned threads = fact->size(); //TODO block.x i argument template'a muszą się zgadzać - przydałby się jakiś switch - najlepiej ładnie opakować redukcję dim3 block( 512 );//min( 512, threads ) ); dim3 grid( 1 ); reduceFull<unsigned, 512><<<grid, block>>>( filter.d_data(), planetsInUse.d_data(), threads ); //log_printf( INFO, "%u of %u planets in use.\n", planetsInUse.retrieve(), threads ); if( Frequently == filteringPolicy ) return planetsInUse.retrieve() + 20 < threads; ASSERT( Rarely == filteringPolicy ); return planetsInUse.retrieve() + 20 < 0.8 * threads; // magic numbers! } void PlanetHolderCleaner::filterHolder() { fact->filter( &filter ); }
06140afe87d933f1b28d4f005540d79161dee80e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /************************* CudaMat ****************************************** * Copyright (C) 2008-2009 by Rainer Heintzmann * * [email protected] * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; Version 2 of the License. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * *************************************************************************** * Compile with: * Windows: system('"c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin\vcvars32.bat"') system('nvcc -c cudaArith.cu -ccbin "c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin') Window 64 bit: system('nvcc -c cudaArith.cu -ccbin "c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\bin" -I"c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\include" ') Linux: * File sudo vi /usr/local/cuda/bin/nvcc.profile * needs the flag -fPIC in the include line system('nvcc -c cudaArith.cu -v -I/usr/local/cuda/include/') */ // To suppress the unused variable argument for ARM targets #pragma diag_suppress 177 #ifndef NAN // should be part of math.h #define NAN (0.0/0.0) #endif #define ACCU_ARRTYPE double // Type of the tempory arrays for reduce operations #define IMUL(a, b) __mul24(a, b) //#define BLOCKSIZE 512 //#define BLOCKSIZE 512 // below is blocksize for temporary array for reduce operations. Has to be a power of 2 in size #ifndef CUIMAGE_REDUCE_THREADS // this can be defined at compile time via the flag NVCCFLAG='-D CUIMAGE_REDUCE_THREADS=512' #define CUIMAGE_REDUCE_THREADS 512 #endif // (prop.maxThreadsPerBlock) // #define CUIMAGE_REDUCE_THREADS 512 // #define CUIMAGE_REDUCE_THREADS 128 //#define CUIMAGE_REDUCE_BLOCKS 64 #define NBLOCKS(N,blockSize) (N/blockSize+(N%blockSize==0?0:1)) #define NBLOCKSL(N,blockSize) 1 // min((N/blockSize+(N%blockSize==0?0:1)),prop.maxGridSize[0]) __global__ void array_copy(float*a, float * c, size_t mx, size_t my, size_t mz, size_t sx,size_t sy,size_t sz, size_t ox, size_t oy, size_t oz) // copies between two memories with different strides { size_t pnum=((blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x); size_t px=pnum%(sx/2); // my x pos of a complex number in the subarray size_t py=pnum/(sx/2); // my y pos of a complex number if(px>=sx || py >= (sy/2)) return; // not in range ... quit size_t ids=2*(px+py*sx); /// offset to array start in floats size_t idd=2*((ox+px)+(oy+py)*sx); // echange two values using a tmp float tmpR = c[idd]; float tmpI = c[idd+1]; c[idd]=a[ids]; // (float)(ox+px); // c[idd+1]=a[ids+1]; // (float)(oy+py); // a[ids]=tmpR; a[ids+1]=tmpI; }
06140afe87d933f1b28d4f005540d79161dee80e.cu
#include "includes.h" /************************* CudaMat ****************************************** * Copyright (C) 2008-2009 by Rainer Heintzmann * * [email protected] * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; Version 2 of the License. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * *************************************************************************** * Compile with: * Windows: system('"c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin\vcvars32.bat"') system('nvcc -c cudaArith.cu -ccbin "c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin') Window 64 bit: system('nvcc -c cudaArith.cu -ccbin "c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\bin" -I"c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\include" ') Linux: * File sudo vi /usr/local/cuda/bin/nvcc.profile * needs the flag -fPIC in the include line system('nvcc -c cudaArith.cu -v -I/usr/local/cuda/include/') */ // To suppress the unused variable argument for ARM targets #pragma diag_suppress 177 #ifndef NAN // should be part of math.h #define NAN (0.0/0.0) #endif #define ACCU_ARRTYPE double // Type of the tempory arrays for reduce operations #define IMUL(a, b) __mul24(a, b) //#define BLOCKSIZE 512 //#define BLOCKSIZE 512 // below is blocksize for temporary array for reduce operations. Has to be a power of 2 in size #ifndef CUIMAGE_REDUCE_THREADS // this can be defined at compile time via the flag NVCCFLAG='-D CUIMAGE_REDUCE_THREADS=512' #define CUIMAGE_REDUCE_THREADS 512 #endif // (prop.maxThreadsPerBlock) // #define CUIMAGE_REDUCE_THREADS 512 // #define CUIMAGE_REDUCE_THREADS 128 //#define CUIMAGE_REDUCE_BLOCKS 64 #define NBLOCKS(N,blockSize) (N/blockSize+(N%blockSize==0?0:1)) #define NBLOCKSL(N,blockSize) 1 // min((N/blockSize+(N%blockSize==0?0:1)),prop.maxGridSize[0]) __global__ void array_copy(float*a, float * c, size_t mx, size_t my, size_t mz, size_t sx,size_t sy,size_t sz, size_t ox, size_t oy, size_t oz) // copies between two memories with different strides { size_t pnum=((blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x); size_t px=pnum%(sx/2); // my x pos of a complex number in the subarray size_t py=pnum/(sx/2); // my y pos of a complex number if(px>=sx || py >= (sy/2)) return; // not in range ... quit size_t ids=2*(px+py*sx); /// offset to array start in floats size_t idd=2*((ox+px)+(oy+py)*sx); // echange two values using a tmp float tmpR = c[idd]; float tmpI = c[idd+1]; c[idd]=a[ids]; // (float)(ox+px); // c[idd+1]=a[ids+1]; // (float)(oy+py); // a[ids]=tmpR; a[ids+1]=tmpI; }
873136b310afe4ea0ec69d6b6c6f282767311a4b.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/mish_impl.cuh" #include "include/hip/hip_fp16.h" template <typename T> __global__ void MishKernel(const size_t size, const T *input_addr, T *output_addr) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output_addr[pos] = input_addr[pos] * tanh(logf(1. + expf(input_addr[pos]))); } } template <> __global__ void MishKernel(const size_t size, const half *input_addr, half *output_addr) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output_addr[pos] = __half2float(input_addr[pos]) * tanh(logf(1. + exp(__half2float(input_addr[pos])))); } } template <> __global__ void MishKernel(const size_t size, const double *input_addr, double *output_addr) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output_addr[pos] = input_addr[pos] * tanh(logf(1. + exp(input_addr[pos]))); } } template <typename T> void Mish(const size_t size, const T *input_addr, T *output_addr, const uint32_t &device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( MishKernel), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, input_addr, output_addr); } template <> void Mish(const size_t size, const half *input_addr, half *output_addr, const uint32_t &device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( MishKernel<half>), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, input_addr, output_addr); } template <> void Mish(const size_t size, const double *input_addr, double *output_addr, const uint32_t &device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( MishKernel<double>), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, input_addr, output_addr); } template CUDA_LIB_EXPORT void Mish<float>(const size_t size, const float *input_addr, float *output_addr, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Mish<half>(const size_t size, const half *input_addr, half *output_addr, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Mish<double>(const size_t size, const double *input_addr, double *output_addr, const uint32_t &device_id, hipStream_t cuda_stream);
873136b310afe4ea0ec69d6b6c6f282767311a4b.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_runtime.h> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/mish_impl.cuh" #include "include/cuda_fp16.h" template <typename T> __global__ void MishKernel(const size_t size, const T *input_addr, T *output_addr) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output_addr[pos] = input_addr[pos] * tanh(logf(1. + expf(input_addr[pos]))); } } template <> __global__ void MishKernel(const size_t size, const half *input_addr, half *output_addr) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output_addr[pos] = __half2float(input_addr[pos]) * tanh(logf(1. + exp(__half2float(input_addr[pos])))); } } template <> __global__ void MishKernel(const size_t size, const double *input_addr, double *output_addr) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output_addr[pos] = input_addr[pos] * tanh(logf(1. + exp(input_addr[pos]))); } } template <typename T> void Mish(const size_t size, const T *input_addr, T *output_addr, const uint32_t &device_id, cudaStream_t cuda_stream) { MishKernel<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, input_addr, output_addr); } template <> void Mish(const size_t size, const half *input_addr, half *output_addr, const uint32_t &device_id, cudaStream_t cuda_stream) { MishKernel<half><<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, input_addr, output_addr); } template <> void Mish(const size_t size, const double *input_addr, double *output_addr, const uint32_t &device_id, cudaStream_t cuda_stream) { MishKernel<double><<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, input_addr, output_addr); } template CUDA_LIB_EXPORT void Mish<float>(const size_t size, const float *input_addr, float *output_addr, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Mish<half>(const size_t size, const half *input_addr, half *output_addr, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Mish<double>(const size_t size, const double *input_addr, double *output_addr, const uint32_t &device_id, cudaStream_t cuda_stream);
ec2fe8fde341d778c83caa79df91d3223c0b8d3f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mvs.cuh" #include "kernel_helpers_hip.cuh" #include "random_gen.cuh" #include "reduce_hip.cuh" #include <library/cpp/cuda/wrappers/arch.cuh> #include <contrib/libs/cub/hipcub/hipcub.hpp> #include <contrib/libs/cub/cub/block/block_radix_sort.cuh> #include <contrib/libs/cub/cub/block/block_scan.cuh> namespace NKernel { __forceinline__ __device__ float GetSingleProbability( float derivativeAbsoluteValue, float threshold ) { return (derivativeAbsoluteValue > threshold) ? 1.0f : __fdividef(derivativeAbsoluteValue, threshold); } template <int BLOCK_THREADS, int ITEMS_PER_THREAD> __device__ __forceinline__ void GetThreshold( float takenFraction, float (&candidates)[ITEMS_PER_THREAD], float (&prefixSum)[ITEMS_PER_THREAD], ui32 size, float* threshold ) { const ui32 thisBlockSize = min(BLOCK_THREADS * ITEMS_PER_THREAD, size); const float sampleSize = thisBlockSize * takenFraction; __shared__ ui32 argMinBorder[BLOCK_THREADS]; __shared__ float minBorder[BLOCK_THREADS]; argMinBorder[threadIdx.x] = 0; minBorder[threadIdx.x] = thisBlockSize; __shared__ bool exit; if (ITEMS_PER_THREAD * threadIdx.x <= thisBlockSize - 1 && ITEMS_PER_THREAD * (threadIdx.x + 1) > thisBlockSize - 1) { const ui32 localId = thisBlockSize - 1 - threadIdx.x * ITEMS_PER_THREAD; #pragma unroll for (int idx = 0; idx < ITEMS_PER_THREAD; ++idx) { if (idx == localId) { if (candidates[idx] <= prefixSum[idx] / sampleSize) { *threshold = prefixSum[idx] / sampleSize; exit = true; } else { exit = false; } } } } __syncthreads(); if (exit) { return; } #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { // Here cub::BlockRadixsort and hipcub::BlockScan numeration is used const ui32 i = k + ITEMS_PER_THREAD * threadIdx.x; if (i < thisBlockSize) { const float takenSize = prefixSum[k] / candidates[k] + thisBlockSize - i - 1; if (takenSize >= sampleSize) { // takenSize is non-growing function minBorder[threadIdx.x] = takenSize; argMinBorder[threadIdx.x] = i; } } } __syncthreads(); #pragma unroll for (int s = BLOCK_THREADS >> 1; s >= 32; s >>= 1) { if (threadIdx.x < s) { if (minBorder[threadIdx.x + s] < minBorder[threadIdx.x]) { argMinBorder[threadIdx.x] = argMinBorder[threadIdx.x + s]; minBorder[threadIdx.x] = minBorder[threadIdx.x + s]; } } __syncthreads(); } if (threadIdx.x < 32) { __syncwarp(); #pragma unroll for (int s = 32 >> 1; s > 0; s >>= 1) { if (minBorder[threadIdx.x + s] < minBorder[threadIdx.x]) { argMinBorder[threadIdx.x] = argMinBorder[threadIdx.x + s]; minBorder[threadIdx.x] = minBorder[threadIdx.x + s]; } __syncwarp(); } } __syncthreads(); if ( ITEMS_PER_THREAD * threadIdx.x <= argMinBorder[0] && ITEMS_PER_THREAD * (threadIdx.x + 1) > argMinBorder[0] ) { const int localId = argMinBorder[0] - threadIdx.x * ITEMS_PER_THREAD; const int denom = sampleSize - (thisBlockSize - argMinBorder[0] - 1); #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; ++i) { minBorder[i] = prefixSum[i]; } *threshold = minBorder[localId] / (denom); } } template <int ITEMS_PER_THREAD, int BLOCK_THREADS> __device__ __forceinline__ void CalculateThreshold( float takenFraction, const float* candidates, ui32 size, float* threshold ) { const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; using BlockRadixSort = cub::BlockRadixSort<float, BLOCK_THREADS, ITEMS_PER_THREAD>; using BlockScan = hipcub::BlockScan<float, BLOCK_THREADS>; __shared__ union { typename BlockRadixSort::TempStorage Sort; typename BlockScan::TempStorage Scan; } tempStorage; // Our current block's offset int blockOffset = blockIdx.x * TILE_SIZE; // Per-thread tile items float items[ITEMS_PER_THREAD]; float scanItems[ITEMS_PER_THREAD]; // Load items into a blocked arrangement int idx = blockOffset + threadIdx.x; const float inf = std::numeric_limits<float>::max(); #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { if (idx < size) { items[k] = StreamLoad(candidates + idx); } else { items[k] = inf; } idx += BLOCK_THREADS; } __syncthreads(); BlockRadixSort(tempStorage.Sort).Sort(items, 8); __syncthreads(); BlockScan(tempStorage.Scan).InclusiveSum(items, scanItems); __syncthreads(); GetThreshold<BLOCK_THREADS, ITEMS_PER_THREAD>( takenFraction, items, scanItems, size - blockOffset, threshold ); } template <int ITEMS_PER_THREAD, int BLOCK_THREADS> __launch_bounds__(BLOCK_THREADS, 1) __global__ void CalculateThresholdImpl( float takenFraction, float* candidates, ui32 size, float* threshold ) { CalculateThreshold<ITEMS_PER_THREAD, BLOCK_THREADS>( takenFraction, candidates, size, threshold + blockIdx.x ); } template <int ITEMS_PER_THREAD, int BLOCK_THREADS> __global__ void MvsBootstrapRadixSortImpl( float takenFraction, float lambda, float* weights, ui32 size, ui64* seeds, ui32 seedSize ) { const int blockOffset = blockIdx.x * BLOCK_THREADS * ITEMS_PER_THREAD; using BlockRadixSort = cub::BlockRadixSort<float, BLOCK_THREADS, ITEMS_PER_THREAD>; using BlockScan = hipcub::BlockScan<float, BLOCK_THREADS>; __shared__ union { typename BlockRadixSort::TempStorage Sort; typename BlockScan::TempStorage Scan; } tempStorage; // Per-thread tile items float weights_per_thread[ITEMS_PER_THREAD]; float items[ITEMS_PER_THREAD]; float scanItems[ITEMS_PER_THREAD]; int idx = blockOffset + threadIdx.x; const float inf = sqrtf(std::numeric_limits<float>::max()) - 2 * lambda; #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { if (idx < size) { weights_per_thread[k] = StreamLoad(weights + idx); } else { weights_per_thread[k] = inf; } idx += BLOCK_THREADS; } #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { weights_per_thread[k] = sqrtf( fmaf(weights_per_thread[k], weights_per_thread[k], lambda) ); items[k] = weights_per_thread[k]; } __syncthreads(); BlockRadixSort(tempStorage.Sort).Sort(items, 8); __syncthreads(); BlockScan(tempStorage.Scan).InclusiveSum(items, scanItems); __syncthreads(); __shared__ float threshold; GetThreshold<BLOCK_THREADS, ITEMS_PER_THREAD>( takenFraction, items, scanItems, size - blockOffset, &threshold ); __syncthreads(); // Set Mvs weights ui32 i = blockIdx.x * blockDim.x + threadIdx.x; seeds += i; ui64 s = seeds[0]; const float eps = std::numeric_limits<float>::epsilon(); #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { const float probability = GetSingleProbability(weights_per_thread[k], threshold); weights_per_thread[k] = (probability > eps && NextUniformF(&s) < probability) ? __fdividef(1.0f, probability) : 0.0f; } seeds[0] = s; idx = blockOffset + threadIdx.x; #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { if (idx < size) { weights[idx] = weights_per_thread[k]; } idx += BLOCK_THREADS; } } void MvsBootstrapRadixSort( const float takenFraction, const float lambda, float* weights, ui32 size, ui64* seeds, ui32 seedSize, TCudaStream stream ) { const ui32 blockThreads = 256; const ui32 SCAN_ITEMS_PER_THREAD = 8192 / blockThreads; const ui32 numBlocks = CeilDivide(size, blockThreads * SCAN_ITEMS_PER_THREAD); { hipLaunchKernelGGL(( MvsBootstrapRadixSortImpl<SCAN_ITEMS_PER_THREAD, blockThreads>) , dim3(numBlocks), dim3(blockThreads), 0, stream , takenFraction, lambda, weights, size, seeds, seedSize ); } } void CalculateMvsThreshold( const float takenFraction, float* candidates, ui32 size, float* threshold, TCudaStream stream ) { const ui32 blockThreads = 256; const ui32 SCAN_ITEMS_PER_THREAD = 8192 / blockThreads; const ui32 numBlocks = CeilDivide(size, blockThreads * SCAN_ITEMS_PER_THREAD); { hipLaunchKernelGGL(( CalculateThresholdImpl<SCAN_ITEMS_PER_THREAD, blockThreads>) , dim3(numBlocks), dim3(blockThreads), 0, stream , takenFraction, candidates, size, threshold ); } } }
ec2fe8fde341d778c83caa79df91d3223c0b8d3f.cu
#include "mvs.cuh" #include "kernel_helpers.cuh" #include "random_gen.cuh" #include "reduce.cuh" #include <library/cpp/cuda/wrappers/arch.cuh> #include <contrib/libs/cub/cub/cub.cuh> #include <contrib/libs/cub/cub/block/block_radix_sort.cuh> #include <contrib/libs/cub/cub/block/block_scan.cuh> namespace NKernel { __forceinline__ __device__ float GetSingleProbability( float derivativeAbsoluteValue, float threshold ) { return (derivativeAbsoluteValue > threshold) ? 1.0f : __fdividef(derivativeAbsoluteValue, threshold); } template <int BLOCK_THREADS, int ITEMS_PER_THREAD> __device__ __forceinline__ void GetThreshold( float takenFraction, float (&candidates)[ITEMS_PER_THREAD], float (&prefixSum)[ITEMS_PER_THREAD], ui32 size, float* threshold ) { const ui32 thisBlockSize = min(BLOCK_THREADS * ITEMS_PER_THREAD, size); const float sampleSize = thisBlockSize * takenFraction; __shared__ ui32 argMinBorder[BLOCK_THREADS]; __shared__ float minBorder[BLOCK_THREADS]; argMinBorder[threadIdx.x] = 0; minBorder[threadIdx.x] = thisBlockSize; __shared__ bool exit; if (ITEMS_PER_THREAD * threadIdx.x <= thisBlockSize - 1 && ITEMS_PER_THREAD * (threadIdx.x + 1) > thisBlockSize - 1) { const ui32 localId = thisBlockSize - 1 - threadIdx.x * ITEMS_PER_THREAD; #pragma unroll for (int idx = 0; idx < ITEMS_PER_THREAD; ++idx) { if (idx == localId) { if (candidates[idx] <= prefixSum[idx] / sampleSize) { *threshold = prefixSum[idx] / sampleSize; exit = true; } else { exit = false; } } } } __syncthreads(); if (exit) { return; } #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { // Here cub::BlockRadixsort and cub::BlockScan numeration is used const ui32 i = k + ITEMS_PER_THREAD * threadIdx.x; if (i < thisBlockSize) { const float takenSize = prefixSum[k] / candidates[k] + thisBlockSize - i - 1; if (takenSize >= sampleSize) { // takenSize is non-growing function minBorder[threadIdx.x] = takenSize; argMinBorder[threadIdx.x] = i; } } } __syncthreads(); #pragma unroll for (int s = BLOCK_THREADS >> 1; s >= 32; s >>= 1) { if (threadIdx.x < s) { if (minBorder[threadIdx.x + s] < minBorder[threadIdx.x]) { argMinBorder[threadIdx.x] = argMinBorder[threadIdx.x + s]; minBorder[threadIdx.x] = minBorder[threadIdx.x + s]; } } __syncthreads(); } if (threadIdx.x < 32) { __syncwarp(); #pragma unroll for (int s = 32 >> 1; s > 0; s >>= 1) { if (minBorder[threadIdx.x + s] < minBorder[threadIdx.x]) { argMinBorder[threadIdx.x] = argMinBorder[threadIdx.x + s]; minBorder[threadIdx.x] = minBorder[threadIdx.x + s]; } __syncwarp(); } } __syncthreads(); if ( ITEMS_PER_THREAD * threadIdx.x <= argMinBorder[0] && ITEMS_PER_THREAD * (threadIdx.x + 1) > argMinBorder[0] ) { const int localId = argMinBorder[0] - threadIdx.x * ITEMS_PER_THREAD; const int denom = sampleSize - (thisBlockSize - argMinBorder[0] - 1); #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; ++i) { minBorder[i] = prefixSum[i]; } *threshold = minBorder[localId] / (denom); } } template <int ITEMS_PER_THREAD, int BLOCK_THREADS> __device__ __forceinline__ void CalculateThreshold( float takenFraction, const float* candidates, ui32 size, float* threshold ) { const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; using BlockRadixSort = cub::BlockRadixSort<float, BLOCK_THREADS, ITEMS_PER_THREAD>; using BlockScan = cub::BlockScan<float, BLOCK_THREADS>; __shared__ union { typename BlockRadixSort::TempStorage Sort; typename BlockScan::TempStorage Scan; } tempStorage; // Our current block's offset int blockOffset = blockIdx.x * TILE_SIZE; // Per-thread tile items float items[ITEMS_PER_THREAD]; float scanItems[ITEMS_PER_THREAD]; // Load items into a blocked arrangement int idx = blockOffset + threadIdx.x; const float inf = std::numeric_limits<float>::max(); #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { if (idx < size) { items[k] = StreamLoad(candidates + idx); } else { items[k] = inf; } idx += BLOCK_THREADS; } __syncthreads(); BlockRadixSort(tempStorage.Sort).Sort(items, 8); __syncthreads(); BlockScan(tempStorage.Scan).InclusiveSum(items, scanItems); __syncthreads(); GetThreshold<BLOCK_THREADS, ITEMS_PER_THREAD>( takenFraction, items, scanItems, size - blockOffset, threshold ); } template <int ITEMS_PER_THREAD, int BLOCK_THREADS> __launch_bounds__(BLOCK_THREADS, 1) __global__ void CalculateThresholdImpl( float takenFraction, float* candidates, ui32 size, float* threshold ) { CalculateThreshold<ITEMS_PER_THREAD, BLOCK_THREADS>( takenFraction, candidates, size, threshold + blockIdx.x ); } template <int ITEMS_PER_THREAD, int BLOCK_THREADS> __global__ void MvsBootstrapRadixSortImpl( float takenFraction, float lambda, float* weights, ui32 size, ui64* seeds, ui32 seedSize ) { const int blockOffset = blockIdx.x * BLOCK_THREADS * ITEMS_PER_THREAD; using BlockRadixSort = cub::BlockRadixSort<float, BLOCK_THREADS, ITEMS_PER_THREAD>; using BlockScan = cub::BlockScan<float, BLOCK_THREADS>; __shared__ union { typename BlockRadixSort::TempStorage Sort; typename BlockScan::TempStorage Scan; } tempStorage; // Per-thread tile items float weights_per_thread[ITEMS_PER_THREAD]; float items[ITEMS_PER_THREAD]; float scanItems[ITEMS_PER_THREAD]; int idx = blockOffset + threadIdx.x; const float inf = sqrtf(std::numeric_limits<float>::max()) - 2 * lambda; #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { if (idx < size) { weights_per_thread[k] = StreamLoad(weights + idx); } else { weights_per_thread[k] = inf; } idx += BLOCK_THREADS; } #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { weights_per_thread[k] = sqrtf( fmaf(weights_per_thread[k], weights_per_thread[k], lambda) ); items[k] = weights_per_thread[k]; } __syncthreads(); BlockRadixSort(tempStorage.Sort).Sort(items, 8); __syncthreads(); BlockScan(tempStorage.Scan).InclusiveSum(items, scanItems); __syncthreads(); __shared__ float threshold; GetThreshold<BLOCK_THREADS, ITEMS_PER_THREAD>( takenFraction, items, scanItems, size - blockOffset, &threshold ); __syncthreads(); // Set Mvs weights ui32 i = blockIdx.x * blockDim.x + threadIdx.x; seeds += i; ui64 s = seeds[0]; const float eps = std::numeric_limits<float>::epsilon(); #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { const float probability = GetSingleProbability(weights_per_thread[k], threshold); weights_per_thread[k] = (probability > eps && NextUniformF(&s) < probability) ? __fdividef(1.0f, probability) : 0.0f; } seeds[0] = s; idx = blockOffset + threadIdx.x; #pragma unroll for (int k = 0; k < ITEMS_PER_THREAD; k++) { if (idx < size) { weights[idx] = weights_per_thread[k]; } idx += BLOCK_THREADS; } } void MvsBootstrapRadixSort( const float takenFraction, const float lambda, float* weights, ui32 size, ui64* seeds, ui32 seedSize, TCudaStream stream ) { const ui32 blockThreads = 256; const ui32 SCAN_ITEMS_PER_THREAD = 8192 / blockThreads; const ui32 numBlocks = CeilDivide(size, blockThreads * SCAN_ITEMS_PER_THREAD); { MvsBootstrapRadixSortImpl<SCAN_ITEMS_PER_THREAD, blockThreads> <<< numBlocks, blockThreads, 0, stream >>> ( takenFraction, lambda, weights, size, seeds, seedSize ); } } void CalculateMvsThreshold( const float takenFraction, float* candidates, ui32 size, float* threshold, TCudaStream stream ) { const ui32 blockThreads = 256; const ui32 SCAN_ITEMS_PER_THREAD = 8192 / blockThreads; const ui32 numBlocks = CeilDivide(size, blockThreads * SCAN_ITEMS_PER_THREAD); { CalculateThresholdImpl<SCAN_ITEMS_PER_THREAD, blockThreads> <<< numBlocks, blockThreads, 0, stream >>> ( takenFraction, candidates, size, threshold ); } } }
ba9c0c8684acc06c0d79fac3c88a035d4eb38e53.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #define STB_IMAGE_IMPLEMENTATION #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image.h" #include "stb_image_write.h" #include <hip/hip_runtime.h> #include <cstdlib> #include <time.h> #include <math.h> #include <iostream> #include "hip/hip_runtime.h" #define FILTRE_SIZE 3 #define BLOCK_HEIGHT 32 #define BLOCK_WIDTH 32 #define SHARE_SIZE_HEIGHT (BLOCK_HEIGHT + FILTRE_SIZE -1) #define SHARE_SIZE_WIDTH (BLOCK_WIDTH + FILTRE_SIZE -1) #define TILE_WIDTH 32 #define MASKCOLS 3 #define MASKROWS 3 __global__ void ShareKernelProcessing(unsigned char* InputImageData, const float *kernel, unsigned char* outputImageData, int channels, int width, int height){ __shared__ float N_ds[SHARE_SIZE_HEIGHT][SHARE_SIZE_WIDTH]; //block of image in shared memory // allocation in shared memory of image blocks int maskr = MASKROWS/2; int dest = threadIdx.y * TILE_WIDTH + threadIdx.x; int destY = dest/SHARE_SIZE_HEIGHT ; //row of shared memory int destX = dest%SHARE_SIZE_WIDTH; //col of shared memory int srcY = blockIdx.y *TILE_WIDTH + destY - maskr; // index to fetch data from input image int srcX = blockIdx.x *TILE_WIDTH + destX - maskr; // index to fetch data from input image int src = (srcY *width +srcX) * channels + k; // index of input image if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width){ N_ds[destY][destX] = InputImageData[src]; // copy element of image in shared memory } else N_ds[destY][destX] = 0; dest = threadIdx.y * TILE_WIDTH+ threadIdx.x + TILE_WIDTH * TILE_WIDTH; destY = dest/SHARE_SIZE_HEIGHT; destX = dest%SHARE_SIZE_WIDTH; srcY = blockIdx.y *TILE_WIDTH + destY - maskr; srcX = blockIdx.x *TILE_WIDTH + destX - maskr; src = (srcY *width +srcX) * channels + k; if(destY < SHARE_SIZE_HEIGHT){ if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width) N_ds[destY][destX] = InputImageData[src]; else{ N_ds[destY][destX] = 0; } __syncthreads(); //compute kernel convolution float accum = 0; int y, x; for (y= 0; y < MASKCOLS; y++) for(x = 0; x<MASKROWS; x++) { accum += N_ds[threadIdx.y + y][threadIdx.x + x] *kernel[y * MASKCOLS + x]; y = blockIdx.y * TILE_WIDTH + threadIdx.y; x = blockIdx.x * TILE_WIDTH + threadIdx.x; if(y < height && x < width) outputImageData[(y * width + x) ] = accum; __syncthreads(); } } void cuda_error(hipError_t err,const char *file,int line) { //cude check errors if (err != hipSuccess){ printf("%s in %s at line %d\n" , hipGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define cuda_error_check(err) (cuda_error( err, __FILE__, __LINE__ )) int main(int argc, char** argv) { //declarations int width = 0, height = 0, nchannels = 0; int num_iteration = 1000; float executionTime ; int const desired_channels = 1; // request to convert image to gray char const * const filename1 = argv[1]; char const * const filename2 = "sortie.jpg"; // Load the image unsigned char* data_in = stbi_load(filename1, &width, &height, &nchannels, desired_channels); // check for errors if (!data_in || !width || !height || !nchannels){ printf("Error loading image %s", filename1); return -1; } // the filter mask float mask[FILTRE_SIZE*FILTRE_SIZE] = { -1, -1, -1, -1, 8, -1, -1, -1, -1}; // Memory allocation GPU unsigned char *gpu_data_in, *gpu_data_out; unsigned char*data_out = (unsigned char*)malloc(width * height * desired_channels); float * gpu_mask; cuda_error_check(hipMalloc (( void **)&gpu_data_in, width * height * desired_channels*sizeof(unsigned char))); cuda_error_check(hipMalloc (( void **)&gpu_data_out, width * height * desired_channels*sizeof(unsigned char))); cuda_error_check(hipMalloc (( void **)&gpu_mask, FILTRE_SIZE*FILTRE_SIZE*sizeof(float))); // data copy host to device cuda_error_check(hipMemcpy (gpu_data_in, data_in, width * height * desired_channels*sizeof(unsigned char) , hipMemcpyHostToDevice)); cuda_error_check(hipMemcpy (gpu_mask, mask , FILTRE_SIZE*FILTRE_SIZE*sizeof(float), hipMemcpyHostToDevice)); // Set up the grid and block dimensions for the executions const unsigned int block_col = 32; const unsigned int block_row = 32; // creat cuda event to calculate time execution [start,stop] hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // grid dimension dim3 grid(height/block_col, width/ block_row, 1); // block dimension dim3 threadBlock(block_col, block_row, 1); // start recording hipEventRecord(start, 0); for(int i=0; i < num_iteration; i++){ // karnel call hipLaunchKernelGGL(( ShareKernelProcessing), dim3(grid), dim3(threadBlock) , 0, 0, gpu_data_in,gpu_mask, gpu_data_out,desired_channels,height, width); } hipEventRecord(stop, 0); // stop recording hipEventSynchronize(stop); hipEventElapsedTime(&executionTime, start, stop); // data copy device to host cuda_error_check(hipMemcpy (data_out, gpu_data_out, width * height * desired_channels, hipMemcpyDeviceToHost)); printf("Execution Time 1000 images : %f ms \n", executionTime); printf("Execution Time image : %f ms \n", executionTime/1000); //write the image if(!stbi_write_jpg(filename2, height, width, 1, data_out, height)) { printf("Error saving image %s \n", filename2); return (-1); } free(data_in); free(data_out); hipFree(gpu_data_in); hipFree(gpu_data_out); hipFree(gpu_mask); }
ba9c0c8684acc06c0d79fac3c88a035d4eb38e53.cu
#include <stdio.h> #include <stdlib.h> #define STB_IMAGE_IMPLEMENTATION #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image.h" #include "stb_image_write.h" #include <cuda.h> #include <cstdlib> #include <time.h> #include <math.h> #include <iostream> #include "cuda_runtime.h" #define FILTRE_SIZE 3 #define BLOCK_HEIGHT 32 #define BLOCK_WIDTH 32 #define SHARE_SIZE_HEIGHT (BLOCK_HEIGHT + FILTRE_SIZE -1) #define SHARE_SIZE_WIDTH (BLOCK_WIDTH + FILTRE_SIZE -1) #define TILE_WIDTH 32 #define MASKCOLS 3 #define MASKROWS 3 __global__ void ShareKernelProcessing(unsigned char* InputImageData, const float *kernel, unsigned char* outputImageData, int channels, int width, int height){ __shared__ float N_ds[SHARE_SIZE_HEIGHT][SHARE_SIZE_WIDTH]; //block of image in shared memory // allocation in shared memory of image blocks int maskr = MASKROWS/2; int dest = threadIdx.y * TILE_WIDTH + threadIdx.x; int destY = dest/SHARE_SIZE_HEIGHT ; //row of shared memory int destX = dest%SHARE_SIZE_WIDTH; //col of shared memory int srcY = blockIdx.y *TILE_WIDTH + destY - maskr; // index to fetch data from input image int srcX = blockIdx.x *TILE_WIDTH + destX - maskr; // index to fetch data from input image int src = (srcY *width +srcX) * channels + k; // index of input image if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width){ N_ds[destY][destX] = InputImageData[src]; // copy element of image in shared memory } else N_ds[destY][destX] = 0; dest = threadIdx.y * TILE_WIDTH+ threadIdx.x + TILE_WIDTH * TILE_WIDTH; destY = dest/SHARE_SIZE_HEIGHT; destX = dest%SHARE_SIZE_WIDTH; srcY = blockIdx.y *TILE_WIDTH + destY - maskr; srcX = blockIdx.x *TILE_WIDTH + destX - maskr; src = (srcY *width +srcX) * channels + k; if(destY < SHARE_SIZE_HEIGHT){ if(srcY>= 0 && srcY < height && srcX>=0 && srcX < width) N_ds[destY][destX] = InputImageData[src]; else{ N_ds[destY][destX] = 0; } __syncthreads(); //compute kernel convolution float accum = 0; int y, x; for (y= 0; y < MASKCOLS; y++) for(x = 0; x<MASKROWS; x++) { accum += N_ds[threadIdx.y + y][threadIdx.x + x] *kernel[y * MASKCOLS + x]; y = blockIdx.y * TILE_WIDTH + threadIdx.y; x = blockIdx.x * TILE_WIDTH + threadIdx.x; if(y < height && x < width) outputImageData[(y * width + x) ] = accum; __syncthreads(); } } void cuda_error(cudaError_t err,const char *file,int line) { //cude check errors if (err != cudaSuccess){ printf("%s in %s at line %d\n" , cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define cuda_error_check(err) (cuda_error( err, __FILE__, __LINE__ )) int main(int argc, char** argv) { //declarations int width = 0, height = 0, nchannels = 0; int num_iteration = 1000; float executionTime ; int const desired_channels = 1; // request to convert image to gray char const * const filename1 = argv[1]; char const * const filename2 = "sortie.jpg"; // Load the image unsigned char* data_in = stbi_load(filename1, &width, &height, &nchannels, desired_channels); // check for errors if (!data_in || !width || !height || !nchannels){ printf("Error loading image %s", filename1); return -1; } // the filter mask float mask[FILTRE_SIZE*FILTRE_SIZE] = { -1, -1, -1, -1, 8, -1, -1, -1, -1}; // Memory allocation GPU unsigned char *gpu_data_in, *gpu_data_out; unsigned char*data_out = (unsigned char*)malloc(width * height * desired_channels); float * gpu_mask; cuda_error_check(cudaMalloc (( void **)&gpu_data_in, width * height * desired_channels*sizeof(unsigned char))); cuda_error_check(cudaMalloc (( void **)&gpu_data_out, width * height * desired_channels*sizeof(unsigned char))); cuda_error_check(cudaMalloc (( void **)&gpu_mask, FILTRE_SIZE*FILTRE_SIZE*sizeof(float))); // data copy host to device cuda_error_check(cudaMemcpy (gpu_data_in, data_in, width * height * desired_channels*sizeof(unsigned char) , cudaMemcpyHostToDevice)); cuda_error_check(cudaMemcpy (gpu_mask, mask , FILTRE_SIZE*FILTRE_SIZE*sizeof(float), cudaMemcpyHostToDevice)); // Set up the grid and block dimensions for the executions const unsigned int block_col = 32; const unsigned int block_row = 32; // creat cuda event to calculate time execution [start,stop] cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // grid dimension dim3 grid(height/block_col, width/ block_row, 1); // block dimension dim3 threadBlock(block_col, block_row, 1); // start recording cudaEventRecord(start, 0); for(int i=0; i < num_iteration; i++){ // karnel call ShareKernelProcessing<<< grid, threadBlock >>>(gpu_data_in,gpu_mask, gpu_data_out,desired_channels,height, width); } cudaEventRecord(stop, 0); // stop recording cudaEventSynchronize(stop); cudaEventElapsedTime(&executionTime, start, stop); // data copy device to host cuda_error_check(cudaMemcpy (data_out, gpu_data_out, width * height * desired_channels, cudaMemcpyDeviceToHost)); printf("Execution Time 1000 images : %f ms \n", executionTime); printf("Execution Time image : %f ms \n", executionTime/1000); //write the image if(!stbi_write_jpg(filename2, height, width, 1, data_out, height)) { printf("Error saving image %s \n", filename2); return (-1); } free(data_in); free(data_out); cudaFree(gpu_data_in); cudaFree(gpu_data_out); cudaFree(gpu_mask); }
e21ad0d17e764484db6d97b2cfe7e6b2b32f02c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Distributed under MIT licence. See https://github.com/QuEST-Kit/QuEST/blob/master/LICENCE.txt for details /** @file * An implementation of the backend in ../QuEST_internal.h for a GPU environment. * * @author Ania Brown * @author Tyson Jones */ # include "QuEST.h" # include "QuEST_precision.h" # include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey # include "mt19937ar.h" # include <stdlib.h> # include <stdio.h> # include <math.h> # define REDUCE_SHARED_SIZE 512 # define DEBUG 1 /* * struct types for concisely passing unitaries to kernels */ // hide these from doxygen /// \cond HIDDEN_SYMBOLS typedef struct ArgMatrix2 { Complex r0c0, r0c1; Complex r1c0, r1c1; } ArgMatrix2; typedef struct ArgMatrix4 { Complex r0c0, r0c1, r0c2, r0c3; Complex r1c0, r1c1, r1c2, r1c3; Complex r2c0, r2c1, r2c2, r2c3; Complex r3c0, r3c1, r3c2, r3c3; } ArgMatrix4; ArgMatrix2 argifyMatrix2(ComplexMatrix2 m) { ArgMatrix2 a; a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0]; a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1]; a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0]; a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1]; return a; } ArgMatrix4 argifyMatrix4(ComplexMatrix4 m) { ArgMatrix4 a; a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0]; a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1]; a.r0c2.real=m.real[0][2]; a.r0c2.imag=m.imag[0][2]; a.r0c3.real=m.real[0][3]; a.r0c3.imag=m.imag[0][3]; a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0]; a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1]; a.r1c2.real=m.real[1][2]; a.r1c2.imag=m.imag[1][2]; a.r1c3.real=m.real[1][3]; a.r1c3.imag=m.imag[1][3]; a.r2c0.real=m.real[2][0]; a.r2c0.imag=m.imag[2][0]; a.r2c1.real=m.real[2][1]; a.r2c1.imag=m.imag[2][1]; a.r2c2.real=m.real[2][2]; a.r2c2.imag=m.imag[2][2]; a.r2c3.real=m.real[2][3]; a.r2c3.imag=m.imag[2][3]; a.r3c0.real=m.real[3][0]; a.r3c0.imag=m.imag[3][0]; a.r3c1.real=m.real[3][1]; a.r3c1.imag=m.imag[3][1]; a.r3c2.real=m.real[3][2]; a.r3c2.imag=m.imag[3][2]; a.r3c3.real=m.real[3][3]; a.r3c3.imag=m.imag[3][3]; return a; } /// \endcond /* * in-kernel bit twiddling functions */ __forceinline__ __device__ int extractBit (const int locationOfBitFromRight, const long long int theEncodedNumber) { return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight; } __forceinline__ __device__ int getBitMaskParity(long long int mask) { int parity = 0; while (mask) { parity = !parity; mask = mask & (mask-1); } return parity; } __forceinline__ __device__ long long int flipBit(const long long int number, const int bitInd) { return (number ^ (1LL << bitInd)); } __forceinline__ __device__ long long int insertZeroBit(const long long int number, const int index) { long long int left, right; left = (number >> index) << index; right = number - left; return (left << 1) ^ right; } __forceinline__ __device__ long long int insertTwoZeroBits(const long long int number, const int bit1, const int bit2) { int small = (bit1 < bit2)? bit1 : bit2; int big = (bit1 < bit2)? bit2 : bit1; return insertZeroBit(insertZeroBit(number, small), big); } __forceinline__ __device__ long long int insertZeroBits(long long int number, int* inds, const int numInds) { /* inserted bit inds must strictly increase, so that their final indices are correct. * in-lieu of sorting (avoided since no C++ variable-size arrays, and since we're already * memory bottle-necked so overhead eats this slowdown), we find the next-smallest index each * at each insert. recall every element of inds (a positive or zero number) is unique. * This function won't appear in the CPU code, which can use C99 variable-size arrays and * ought to make a sorted array before threading */ int curMin = inds[0]; int prevMin = -1; for (int n=0; n < numInds; n++) { // find next min for (int t=0; t < numInds; t++) if (inds[t]>prevMin && inds[t]<curMin) curMin = inds[t]; number = insertZeroBit(number, curMin); // set curMin to an arbitrary non-visited elem prevMin = curMin; for (int t=0; t < numInds; t++) if (inds[t] > curMin) { curMin = inds[t]; break; } } return number; } /* * state vector and density matrix operations */ #ifdef __cplusplus extern "C" { #endif void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) { hipDeviceSynchronize(); hipMemcpy( qureg.stateVec.real + startInd, reals, numAmps * sizeof(*(qureg.stateVec.real)), hipMemcpyHostToDevice); hipMemcpy( qureg.stateVec.imag + startInd, imags, numAmps * sizeof(*(qureg.stateVec.imag)), hipMemcpyHostToDevice); } /** works for both statevectors and density matrices */ void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) { // copy copyQureg's GPU statevec to targetQureg's GPU statevec hipDeviceSynchronize(); hipMemcpy( targetQureg.stateVec.real, copyQureg.stateVec.real, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.stateVec.real)), hipMemcpyDeviceToDevice); hipMemcpy( targetQureg.stateVec.imag, copyQureg.stateVec.imag, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.stateVec.imag)), hipMemcpyDeviceToDevice); } __global__ void densmatr_initPureStateKernel( long long int numPureAmps, qreal *targetVecReal, qreal *targetVecImag, qreal *copyVecReal, qreal *copyVecImag) { // this is a particular index of the pure copyQureg long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=numPureAmps) return; qreal realRow = copyVecReal[index]; qreal imagRow = copyVecImag[index]; for (long long int col=0; col < numPureAmps; col++) { qreal realCol = copyVecReal[col]; qreal imagCol = - copyVecImag[col]; // minus for conjugation targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol; targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol; } } void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_initPureStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, copyQureg.numAmpsPerChunk, targetQureg.stateVec.real, targetQureg.stateVec.imag, copyQureg.stateVec.real, copyQureg.stateVec.imag); } __global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = probFactor; stateVecImag[index] = 0.0; } void densmatr_initPlusState(Qureg qureg) { qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented)); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, probFactor, qureg.stateVec.real, qureg.stateVec.imag); } __global__ void densmatr_initClassicalStateKernel( long long int densityNumElems, qreal *densityReal, qreal *densityImag, long long int densityInd) { // initialise the state to all zeros long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= densityNumElems) return; densityReal[index] = 0.0; densityImag[index] = 0.0; if (index==densityInd){ // classical state has probability 1 densityReal[densityInd] = 1.0; densityImag[densityInd] = 0.0; } } void densmatr_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); // index of the desired state in the flat density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int densityInd = (densityDim + 1)*stateInd; // identical to pure version hipLaunchKernelGGL(( densmatr_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.stateVec.real, qureg.stateVec.imag, densityInd); } void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env) { printf("statevec_createQureg\n"); // allocate CPU memory long long int numAmps = 1L << numQubits; long long int numAmpsPerRank = numAmps/env.numRanks; //qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real)); //qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag)); if (env.numRanks>1){ qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real)); qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag)); } // check cpu memory allocation was successful //if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag)) // && numAmpsPerRank ) { // printf("Could not allocate memory!\n"); // exit (EXIT_FAILURE); //} if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag)) && numAmpsPerRank ) { printf("Could not allocate memory!\n"); exit (EXIT_FAILURE); } qureg->numQubitsInStateVec = numQubits; qureg->numAmpsPerChunk = numAmpsPerRank; qureg->numAmpsTotal = numAmps; qureg->chunkId = env.rank; qureg->numChunks = env.numRanks; qureg->isDensityMatrix = 0; // allocate GPU memory hipMallocManaged(&(qureg->stateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->stateVec.real))); hipMallocManaged(&(qureg->stateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->stateVec.imag))); hipMallocManaged(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal)); hipMallocManaged(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))* sizeof(qreal)); //qureg->stateVec.real = (qreal*)malloc(qureg->numAmpsPerChunk*sizeof(*(qureg->stateVec.real))); //qureg->stateVec.imag = (qreal*)malloc(qureg->numAmpsPerChunk*sizeof(*(qureg->stateVec.imag))); // check gpu memory allocation was successful if (!(qureg->stateVec.real) || !(qureg->stateVec.imag)){ printf("Could not allocate memory on GPU!\n"); exit (EXIT_FAILURE); } } void statevec_destroyQureg(Qureg qureg, QuESTEnv env) { // Free CPU memory //free(qureg.stateVec.real); //free(qureg.stateVec.imag); if (env.numRanks>1){ free(qureg.pairStateVec.real); free(qureg.pairStateVec.imag); } // Free GPU memory hipFree(qureg.stateVec.real); hipFree(qureg.stateVec.imag); hipFree(qureg.firstLevelReduction); hipFree(qureg.secondLevelReduction); //free(qureg.stateVec.real); //free(qureg.stateVec.imag); } DiagonalOp agnostic_createDiagonalOp(int numQubits, QuESTEnv env) { DiagonalOp op; op.numQubits = numQubits; op.numElemsPerChunk = (1LL << numQubits) / env.numRanks; op.chunkId = env.rank; op.numChunks = env.numRanks; // allocate CPU memory (initialised to zero) op.real = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal)); op.imag = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal)); // @TODO no handling of rank>1 allocation (no distributed GPU) // check cpu memory allocation was successful if ( !op.real || !op.imag ) { printf("Could not allocate memory!\n"); exit(EXIT_FAILURE); } // allocate GPU memory size_t arrSize = op.numElemsPerChunk * sizeof(qreal); hipMalloc(&(op.deviceOperator.real), arrSize); hipMalloc(&(op.deviceOperator.imag), arrSize); // check gpu memory allocation was successful if (!op.deviceOperator.real || !op.deviceOperator.imag) { printf("Could not allocate memory on GPU!\n"); exit(EXIT_FAILURE); } // initialise GPU memory to zero hipMemset(op.deviceOperator.real, 0, arrSize); hipMemset(op.deviceOperator.imag, 0, arrSize); return op; } void agnostic_destroyDiagonalOp(DiagonalOp op) { free(op.real); free(op.imag); hipFree(op.deviceOperator.real); hipFree(op.deviceOperator.imag); } void agnostic_syncDiagonalOp(DiagonalOp op) { size_t arrSize = (1LL << op.numQubits) * sizeof(qreal); hipDeviceSynchronize(); hipMemcpy(op.deviceOperator.real, op.real, arrSize, hipMemcpyHostToDevice); hipMemcpy(op.deviceOperator.imag, op.imag, arrSize, hipMemcpyHostToDevice); } int GPUExists(void){ int deviceCount, device; int gpuDeviceCount = 0; struct hipDeviceProp_t properties; hipError_t cudaResultCode = hipGetDeviceCount(&deviceCount); if (cudaResultCode != hipSuccess) deviceCount = 0; /* machines with no GPUs can still report one emulation device */ for (device = 0; device < deviceCount; ++device) { hipGetDeviceProperties(&properties, device); if (properties.major != 9999) { /* 9999 means emulation only */ ++gpuDeviceCount; } } if (gpuDeviceCount) return 1; else return 0; } QuESTEnv createQuESTEnv(void) { if (!GPUExists()){ printf("Trying to run GPU code with no GPU available\n"); exit(EXIT_FAILURE); } QuESTEnv env; env.rank=0; env.numRanks=1; seedQuESTDefault(); return env; } void syncQuESTEnv(QuESTEnv env){ hipDeviceSynchronize(); } int syncQuESTSuccess(int successCode){ return successCode; } void destroyQuESTEnv(QuESTEnv env){ // MPI finalize goes here in MPI version. Call this function anyway for consistency } void reportQuESTEnv(QuESTEnv env){ printf("EXECUTION ENVIRONMENT:\n"); printf("Running locally on one node with GPU\n"); printf("Number of ranks is %d\n", env.numRanks); # ifdef _OPENMP printf("OpenMP enabled\n"); printf("Number of threads available is %d\n", omp_get_max_threads()); # else printf("OpenMP disabled\n"); # endif } void getEnvironmentString(QuESTEnv env, Qureg qureg, char str[200]){ sprintf(str, "%dqubits_GPU_noMpi_noOMP", qureg.numQubitsInStateVec); } void copyStateToGPU(Qureg qureg) { //if (DEBUG) printf("Copying data to GPU\n"); //hipMemcpy(qureg.stateVec.real, qureg.stateVec.real, // qureg.numAmpsPerChunk*sizeof(*(qureg.stateVec.real)), hipMemcpyHostToDevice); //hipMemcpy(qureg.stateVec.imag, qureg.stateVec.imag, // qureg.numAmpsPerChunk*sizeof(*(qureg.stateVec.imag)), hipMemcpyHostToDevice); //if (DEBUG) printf("Finished copying data to GPU\n"); } void copyStateFromGPU(Qureg qureg) { //hipDeviceSynchronize(); //if (DEBUG) printf("Copying data from GPU\n"); //hipMemcpy(qureg.stateVec.real, qureg.stateVec.real, // qureg.numAmpsPerChunk*sizeof(*(qureg.stateVec.real)), hipMemcpyDeviceToHost); //hipMemcpy(qureg.stateVec.imag, qureg.stateVec.imag, // qureg.numAmpsPerChunk*sizeof(*(qureg.stateVec.imag)), hipMemcpyDeviceToHost); //if (DEBUG) printf("Finished copying data from GPU\n"); } /** Print the current state vector of probability amplitudes for a set of qubits to standard out. For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits */ void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){ long long int index; int rank; //copyStateFromGPU(qureg); hipDeviceSynchronize(); if (qureg.numQubitsInStateVec<=5){ for (rank=0; rank<qureg.numChunks; rank++){ if (qureg.chunkId==rank){ if (reportRank) { printf("Reporting state from rank %d [\n", qureg.chunkId); //printf("\trank, index, real, imag\n"); printf("real, imag\n"); } else if (rank==0) { printf("Reporting state [\n"); printf("real, imag\n"); } for(index=0; index<qureg.numAmpsPerChunk; index++){ printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]); } if (reportRank || rank==qureg.numChunks-1) printf("]\n"); } syncQuESTEnv(env); } } } qreal statevec_getRealAmp(Qureg qureg, long long int index){ //qreal el=0; //hipMemcpy(&el, &(qureg.stateVec.real[index]), // sizeof(*(qureg.stateVec.real)), hipMemcpyDeviceToHost); //return el; return qureg.stateVec.real[index]; } qreal statevec_getImagAmp(Qureg qureg, long long int index){ //qreal el=0; //hipMemcpy(&el, &(qureg.stateVec.imag[index]), // sizeof(*(qureg.stateVec.imag)), hipMemcpyDeviceToHost); //return el; return qureg.stateVec.imag[index]; } __global__ void statevec_initBlankStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; // initialise the statevector to be all-zeros index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; } void statevec_initBlankState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initBlankStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.stateVec.real, qureg.stateVec.imag); } //__global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ __global__ void statevec_initZeroStateKernel(Qureg qureg) { long long int index; long long int stateVecSize = qureg.numAmpsPerChunk; // initialise the state to |0000..0000> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qureg.stateVec.real[index] = 0.0; qureg.stateVec.imag[index] = 0.0; if (index==0){ // zero state |0000..0000> has probability 1 qureg.stateVec.real[0] = 1.0; qureg.stateVec.imag[0] = 0.0; } } void statevec_initZeroState(Qureg qureg) { printf("statevec_initZeroState\n"); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); //statevec_initZeroStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( // qureg.numAmpsPerChunk, // qureg.stateVec.real, // qureg.stateVec.imag); hipLaunchKernelGGL(( statevec_initZeroStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg); hipDeviceSynchronize(); printf("after initZeroState real[0]: %g real[1]: %g\n", qureg.stateVec.real[0], qureg.stateVec.real[1]); } __global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize); stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } void statevec_initPlusState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initPlusStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.stateVec.real, qureg.stateVec.imag); } __global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){ long long int index; // initialise the state to |stateInd> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; if (index==stateInd){ // classical state has probability 1 stateVecReal[stateInd] = 1.0; stateVecImag[stateInd] = 0.0; } } void statevec_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initClassicalStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.stateVec.real, qureg.stateVec.imag, stateInd); } __global__ void statevec_initDebugStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = (index*2.0)/10.0; stateVecImag[index] = (index*2.0+1.0)/10.0; } void statevec_initDebugState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initDebugStateKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg.numAmpsPerChunk, qureg.stateVec.real, qureg.stateVec.imag); } __global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){ long long int index; int bit; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2); bit = extractBit(qubitId, index); if (bit==outcome) { stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } else { stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; } } void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_initStateOfSingleQubitKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg->numAmpsPerChunk, qureg->stateVec.real, qureg->stateVec.imag, qubitId, outcome); } // returns 1 if successful, else 0 int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){ long long int chunkSize, stateVecSize; long long int indexInChunk, totalIndex; chunkSize = qureg->numAmpsPerChunk; stateVecSize = chunkSize*qureg->numChunks; qreal *stateVecReal = qureg->stateVec.real; qreal *stateVecImag = qureg->stateVec.imag; FILE *fp; char line[200]; fp = fopen(filename, "r"); if (fp == NULL) return 0; indexInChunk = 0; totalIndex = 0; while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){ if (line[0]!='#'){ int chunkId = totalIndex/chunkSize; if (chunkId==qureg->chunkId){ # if QuEST_PREC==1 sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # elif QuEST_PREC==2 sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # elif QuEST_PREC==4 sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # endif indexInChunk += 1; } totalIndex += 1; } } fclose(fp); copyStateToGPU(*qureg); // indicate success return 1; } int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){ qreal diff; int chunkSize = mq1.numAmpsPerChunk; //copyStateFromGPU(mq1); //copyStateFromGPU(mq2); for (int i=0; i<chunkSize; i++){ diff = mq1.stateVec.real[i] - mq2.stateVec.real[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; } return 1; } __global__ void statevec_compactUnitaryKernel (Qureg qureg, int rotQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << rotQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } void statevec_compactUnitary(Qureg qureg, int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_compactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, alpha, beta); } __global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } } void statevec_controlledCompactUnitary(Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledCompactUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, alpha, beta); } __global__ void statevec_unitaryKernel(Qureg qureg, int targetQubit, ArgMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } void statevec_unitary(Qureg qureg, int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_unitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, argifyMatrix2(u)); } __global__ void statevec_multiControlledMultiQubitUnitaryKernel( Qureg qureg, long long int ctrlMask, int* targs, int numTargs, qreal* uRe, qreal* uIm, long long int* ampInds, qreal* reAmps, qreal* imAmps, long long int numTargAmps) { // decide the amplitudes this thread will modify long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; long long int numTasks = qureg.numAmpsPerChunk >> numTargs; // kernel called on every 1 in 2^numTargs amplitudes if (thisTask>=numTasks) return; // find this task's start index (where all targs are 0) long long int ind00 = insertZeroBits(thisTask, targs, numTargs); // this task only modifies amplitudes if control qubits are 1 for this state if (ctrlMask && (ctrlMask&ind00) != ctrlMask) return; qreal *reVec = qureg.stateVec.real; qreal *imVec = qureg.stateVec.imag; /* each thread needs: long long int ampInds[numAmps]; qreal reAmps[numAmps]; qreal imAmps[numAmps]; but instead has access to shared arrays, with below stride and offset */ size_t stride = gridDim.x*blockDim.x; size_t offset = blockIdx.x*blockDim.x + threadIdx.x; // determine the indices and record values of target amps long long int ind; for (int i=0; i < numTargAmps; i++) { // get global index of current target qubit assignment ind = ind00; for (int t=0; t < numTargs; t++) if (extractBit(t, i)) ind = flipBit(ind, targs[t]); ampInds[i*stride+offset] = ind; reAmps [i*stride+offset] = reVec[ind]; imAmps [i*stride+offset] = imVec[ind]; } // update the amplitudes for (int r=0; r < numTargAmps; r++) { ind = ampInds[r*stride+offset]; reVec[ind] = 0; imVec[ind] = 0; for (int c=0; c < numTargAmps; c++) { qreal uReElem = uRe[c + r*numTargAmps]; qreal uImElem = uIm[c + r*numTargAmps]; reVec[ind] += reAmps[c*stride+offset]*uReElem - imAmps[c*stride+offset]*uImElem; imVec[ind] += reAmps[c*stride+offset]*uImElem + imAmps[c*stride+offset]*uReElem; } } } void statevec_multiControlledMultiQubitUnitary(Qureg qureg, long long int ctrlMask, int* targs, int numTargs, ComplexMatrixN u) { int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>numTargs)/threadsPerCUDABlock); // allocate device space for global {targs} (length: numTargs) and populate int *d_targs; size_t targMemSize = numTargs * sizeof *d_targs; hipMallocManaged(&d_targs, targMemSize); hipMemcpy(d_targs, targs, targMemSize, hipMemcpyHostToDevice); // flatten out the u.real and u.imag lists int uNumRows = (1 << u.numQubits); qreal* uReFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uReFlat); qreal* uImFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uImFlat); long long int i = 0; for (int r=0; r < uNumRows; r++) for (int c=0; c < uNumRows; c++) { uReFlat[i] = u.real[r][c]; uImFlat[i] = u.imag[r][c]; i++; } // allocate device space for global u.real and u.imag (flatten by concatenating rows) and populate qreal* d_uRe; qreal* d_uIm; size_t uMemSize = uNumRows*uNumRows * sizeof *d_uRe; // size of each of d_uRe and d_uIm hipMallocManaged(&d_uRe, uMemSize); hipMallocManaged(&d_uIm, uMemSize); hipMemcpy(d_uRe, uReFlat, uMemSize, hipMemcpyHostToDevice); hipMemcpy(d_uIm, uImFlat, uMemSize, hipMemcpyHostToDevice); // allocate device Wspace for thread-local {ampInds}, {reAmps}, {imAmps} (length: 1<<numTargs) long long int *d_ampInds; qreal *d_reAmps; qreal *d_imAmps; size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks; int numTargAmps = uNumRows; hipMallocManaged(&d_ampInds, numTargAmps*gridSize * sizeof *d_ampInds); hipMallocManaged(&d_reAmps, numTargAmps*gridSize * sizeof *d_reAmps); hipMallocManaged(&d_imAmps, numTargAmps*gridSize * sizeof *d_imAmps); // call kernel hipLaunchKernelGGL(( statevec_multiControlledMultiQubitUnitaryKernel), dim3(CUDABlocks),dim3(threadsPerCUDABlock), 0, 0, qureg, ctrlMask, d_targs, numTargs, d_uRe, d_uIm, d_ampInds, d_reAmps, d_imAmps, numTargAmps); // free kernel memory free(uReFlat); free(uImFlat); hipFree(d_targs); hipFree(d_uRe); hipFree(d_uIm); hipFree(d_ampInds); hipFree(d_reAmps); hipFree(d_imAmps); } __global__ void statevec_multiControlledTwoQubitUnitaryKernel(Qureg qureg, long long int ctrlMask, int q1, int q2, ArgMatrix4 u){ // decide the 4 amplitudes this thread will modify long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; long long int numTasks = qureg.numAmpsPerChunk >> 2; // kernel called on every 1 in 4 amplitudes if (thisTask>=numTasks) return; qreal *reVec = qureg.stateVec.real; qreal *imVec = qureg.stateVec.imag; // find indices of amplitudes to modify (treat q1 as the least significant bit) long long int ind00, ind01, ind10, ind11; ind00 = insertTwoZeroBits(thisTask, q1, q2); // modify only if control qubits are 1 for this state if (ctrlMask && (ctrlMask&ind00) != ctrlMask) return; ind01 = flipBit(ind00, q1); ind10 = flipBit(ind00, q2); ind11 = flipBit(ind01, q2); // extract statevec amplitudes qreal re00, re01, re10, re11; qreal im00, im01, im10, im11; re00 = reVec[ind00]; im00 = imVec[ind00]; re01 = reVec[ind01]; im01 = imVec[ind01]; re10 = reVec[ind10]; im10 = imVec[ind10]; re11 = reVec[ind11]; im11 = imVec[ind11]; // apply u * {amp00, amp01, amp10, amp11} reVec[ind00] = u.r0c0.real*re00 - u.r0c0.imag*im00 + u.r0c1.real*re01 - u.r0c1.imag*im01 + u.r0c2.real*re10 - u.r0c2.imag*im10 + u.r0c3.real*re11 - u.r0c3.imag*im11; imVec[ind00] = u.r0c0.imag*re00 + u.r0c0.real*im00 + u.r0c1.imag*re01 + u.r0c1.real*im01 + u.r0c2.imag*re10 + u.r0c2.real*im10 + u.r0c3.imag*re11 + u.r0c3.real*im11; reVec[ind01] = u.r1c0.real*re00 - u.r1c0.imag*im00 + u.r1c1.real*re01 - u.r1c1.imag*im01 + u.r1c2.real*re10 - u.r1c2.imag*im10 + u.r1c3.real*re11 - u.r1c3.imag*im11; imVec[ind01] = u.r1c0.imag*re00 + u.r1c0.real*im00 + u.r1c1.imag*re01 + u.r1c1.real*im01 + u.r1c2.imag*re10 + u.r1c2.real*im10 + u.r1c3.imag*re11 + u.r1c3.real*im11; reVec[ind10] = u.r2c0.real*re00 - u.r2c0.imag*im00 + u.r2c1.real*re01 - u.r2c1.imag*im01 + u.r2c2.real*re10 - u.r2c2.imag*im10 + u.r2c3.real*re11 - u.r2c3.imag*im11; imVec[ind10] = u.r2c0.imag*re00 + u.r2c0.real*im00 + u.r2c1.imag*re01 + u.r2c1.real*im01 + u.r2c2.imag*re10 + u.r2c2.real*im10 + u.r2c3.imag*re11 + u.r2c3.real*im11; reVec[ind11] = u.r3c0.real*re00 - u.r3c0.imag*im00 + u.r3c1.real*re01 - u.r3c1.imag*im01 + u.r3c2.real*re10 - u.r3c2.imag*im10 + u.r3c3.real*re11 - u.r3c3.imag*im11; imVec[ind11] = u.r3c0.imag*re00 + u.r3c0.real*im00 + u.r3c1.imag*re01 + u.r3c1.real*im01 + u.r3c2.imag*re10 + u.r3c2.real*im10 + u.r3c3.imag*re11 + u.r3c3.real*im11; } void statevec_multiControlledTwoQubitUnitary(Qureg qureg, long long int ctrlMask, int q1, int q2, ComplexMatrix4 u) { int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); // one kernel eval for every 4 amplitudes hipLaunchKernelGGL(( statevec_multiControlledTwoQubitUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, ctrlMask, q1, q2, argifyMatrix4(u)); } __global__ void statevec_controlledUnitaryKernel(Qureg qureg, int controlQubit, int targetQubit, ArgMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_controlledUnitary(Qureg qureg, int controlQubit, int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, argifyMatrix2(u)); } __global__ void statevec_multiControlledUnitaryKernel( Qureg qureg, long long int ctrlQubitsMask, long long int ctrlFlipMask, int targetQubit, ArgMatrix2 u ){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; if (ctrlQubitsMask == (ctrlQubitsMask & (indexUp ^ ctrlFlipMask))) { // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_multiControlledUnitary( Qureg qureg, long long int ctrlQubitsMask, long long int ctrlFlipMask, int targetQubit, ComplexMatrix2 u ){ int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiControlledUnitaryKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, ctrlQubitsMask, ctrlFlipMask, targetQubit, argifyMatrix2(u)); } __global__ void statevec_pauliXKernel(Qureg qureg, int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } void statevec_pauliX(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_pauliXKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit); } __global__ void statevec_pauliYKernel(Qureg qureg, int targetQubit, int conjFac){ long long int sizeHalfBlock = 1LL << targetQubit; long long int sizeBlock = 2LL * sizeHalfBlock; long long int numTasks = qureg.numAmpsPerChunk >> 1; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; long long int thisBlock = thisTask / sizeHalfBlock; long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; long long int indexLo = indexUp + sizeHalfBlock; qreal stateRealUp, stateImagUp; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } void statevec_pauliY(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, 1); } void statevec_pauliYConj(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_pauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, -1); } __global__ void statevec_controlledPauliYKernel(Qureg qureg, int controlQubit, int targetQubit, int conjFac) { long long int index; long long int sizeBlock, sizeHalfBlock; long long int stateVecSize; int controlBit; qreal stateRealUp, stateImagUp; long long int thisBlock, indexUp, indexLo; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } } void statevec_controlledPauliY(Qureg qureg, int controlQubit, int targetQubit) { int conjFactor = 1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor); } void statevec_controlledPauliYConj(Qureg qureg, int controlQubit, int targetQubit) { int conjFactor = -1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPauliYKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit, conjFactor); } __global__ void statevec_phaseShiftByTermKernel(Qureg qureg, int targetQubit, qreal cosAngle, qreal sinAngle) { long long int sizeBlock, sizeHalfBlock; long long int thisBlock, indexUp,indexLo; qreal stateRealLo, stateImagLo; long long int thisTask; long long int numTasks = qureg.numAmpsPerChunk >> 1; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo; } void statevec_phaseShiftByTerm(Qureg qureg, int targetQubit, Complex term) { qreal cosAngle = term.real; qreal sinAngle = term.imag; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_phaseShiftByTermKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit, cosAngle, sinAngle); } __global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, int idQubit1, int idQubit2, qreal cosAngle, qreal sinAngle) { long long int index; long long int stateVecSize; int bit1, bit2; qreal stateRealLo, stateImagLo; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_controlledPhaseShift(Qureg qureg, int idQubit1, int idQubit2, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2, cosAngle, sinAngle); } __global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { qreal stateRealLo, stateImagLo; long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); long long int mask = getQubitBitMask(controlQubits, numControlQubits); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiControlledPhaseShiftKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, cosAngle, sinAngle); } __global__ void statevec_multiRotateZKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { long long int stateVecSize = qureg.numAmpsPerChunk; long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; int fac = getBitMaskParity(mask & index)? -1 : 1; qreal stateReal = stateVecReal[index]; qreal stateImag = stateVecImag[index]; stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag; stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag; } void statevec_multiRotateZ(Qureg qureg, long long int mask, qreal angle) { qreal cosAngle = cos(angle/2.0); qreal sinAngle = sin(angle/2.0); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiRotateZKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask, cosAngle, sinAngle); } qreal densmatr_calcTotalProb(Qureg qureg) { // computes the trace using Kahan summation qreal pTotal=0; qreal y, t, c; c = 0; long long int numCols = 1LL << qureg.numQubitsRepresented; long long diagIndex; //copyStateFromGPU(qureg); for (int col=0; col< numCols; col++) { diagIndex = col*(numCols + 1); y = qureg.stateVec.real[diagIndex] - c; t = pTotal + y; c = ( t - pTotal ) - y; // brackets are important pTotal = t; } return pTotal; } qreal statevec_calcTotalProb(Qureg qureg){ /* IJB - implemented using Kahan summation for greater accuracy at a slight floating point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */ /* Don't change the bracketing in this routine! */ qreal pTotal=0; qreal y, t, c; long long int index; long long int numAmpsPerRank = qureg.numAmpsPerChunk; //copyStateFromGPU(qureg); hipDeviceSynchronize(); c = 0.0; for (index=0; index<numAmpsPerRank; index++){ /* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */ // pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; /* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */ //pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; } return pTotal; } __global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, int idQubit1, int idQubit2) { long long int index; long long int stateVecSize; int bit1, bit2; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_controlledPhaseFlip(Qureg qureg, int idQubit1, int idQubit2) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, idQubit1, idQubit2); } __global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask) { long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits) { int threadsPerCUDABlock, CUDABlocks; long long int mask = getQubitBitMask(controlQubits, numControlQubits); threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_multiControlledPhaseFlipKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, mask); } __global__ void statevec_swapQubitAmpsKernel(Qureg qureg, int qb1, int qb2) { qreal *reVec = qureg.stateVec.real; qreal *imVec = qureg.stateVec.imag; long long int numTasks = qureg.numAmpsPerChunk >> 2; // each iteration updates 2 amps and skips 2 amps long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; long long int ind00, ind01, ind10; qreal re01, re10, im01, im10; // determine ind00 of |..0..0..>, |..0..1..> and |..1..0..> ind00 = insertTwoZeroBits(thisTask, qb1, qb2); ind01 = flipBit(ind00, qb1); ind10 = flipBit(ind00, qb2); // extract statevec amplitudes re01 = reVec[ind01]; im01 = imVec[ind01]; re10 = reVec[ind10]; im10 = imVec[ind10]; // swap 01 and 10 amps reVec[ind01] = re10; reVec[ind10] = re01; imVec[ind01] = im10; imVec[ind10] = im01; } void statevec_swapQubitAmps(Qureg qureg, int qb1, int qb2) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_swapQubitAmpsKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, qb1, qb2); } __global__ void statevec_hadamardKernel (Qureg qureg, int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; qreal recRoot2 = 1.0/sqrt(2.0); thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo); stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo); stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo); stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo); } void statevec_hadamard(Qureg qureg, int targetQubit) { printf("statevec_hadamard\n"); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipDeviceSynchronize(); hipLaunchKernelGGL(( statevec_hadamardKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, targetQubit); } __global__ void statevec_controlledNotKernel(Qureg qureg, int controlQubit, int targetQubit) { long long int index; long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved long long int stateVecSize; int controlBit; // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } } void statevec_controlledNot(Qureg qureg, int controlQubit, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_controlledNotKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, controlQubit, targetQubit); } __device__ __host__ unsigned int log2Int( unsigned int x ) { unsigned int ans = 0 ; while( x>>=1 ) ans++; return ans ; } __device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){ int i, l, r; int threadMax, maxDepth; threadMax = length/2; maxDepth = log2Int(length/2); for (i=0; i<maxDepth+1; i++){ if (threadIdx.x<threadMax){ l = threadIdx.x; r = l + threadMax; arrayIn[l] = arrayIn[r] + arrayIn[l]; } threadMax = threadMax >> 1; __syncthreads(); // optimise -- use warp shuffle instead } if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0]; } __global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){ extern __shared__ qreal tempReductionArray[]; int blockOffset = blockIdx.x*length; tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2]; tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1]; __syncthreads(); reduceBlock(tempReductionArray, reducedArray, length); } __global__ void densmatr_findProbabilityOfZeroKernel( Qureg qureg, int measureQubit, qreal *reducedArray ) { // run by each thread // use of block here refers to contiguous amplitudes where measureQubit = 0, // (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numTasks = densityDim >> 1; long long int sizeHalfBlock = 1LL << (measureQubit); long long int sizeBlock = 2LL * sizeHalfBlock; long long int thisBlock; // which block this thread is processing long long int thisTask; // which part of the block this thread is processing long long int basisIndex; // index of this thread's computational basis state long long int densityIndex; // " " index of |basis><basis| in the flat density matrix // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; // figure out which density matrix prob that this thread is assigned thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock; densityIndex = (densityDim + 1) * basisIndex; // record the probability in the CUDA-BLOCK-wide array qreal prob = qureg.stateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0 tempReductionArray[threadIdx.x] = prob; // sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } __global__ void statevec_findProbabilityOfZeroKernel( Qureg qureg, int measureQubit, qreal *reducedArray ) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; // (good for shared memory parallelism) extern __shared__ qreal tempReductionArray[]; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; qreal realVal, imagVal; realVal = stateVecReal[index]; imagVal = stateVecImag[index]; tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal; __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){ int levels=0; while (numValuesToReduce){ numValuesToReduce = numValuesToReduce/numReducedPerLevel; levels++; } return levels; } void swapDouble(qreal **a, qreal **b){ qreal *temp; temp = *a; *a = *b; *b = temp; } qreal densmatr_findProbabilityOfZero(Qureg qureg, int measureQubit) { long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0 int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { hipLaunchKernelGGL(( densmatr_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg, measureQubit, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal zeroProb; hipMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return zeroProb; } qreal statevec_findProbabilityOfZero(Qureg qureg, int measureQubit) { long long int numValuesToReduce = qureg.numAmpsPerChunk>>1; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; qreal stateProb=0; int firstTime=1; int maxReducedPerLevel = REDUCE_SHARED_SIZE; while(numValuesToReduce>1){ if (numValuesToReduce<maxReducedPerLevel){ // Need less than one CUDA block to reduce values valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { // Use full CUDA blocks, with block size constrained by shared mem usage valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime){ hipLaunchKernelGGL(( statevec_findProbabilityOfZeroKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg, measureQubit, qureg.firstLevelReduction); firstTime=0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return stateProb; } qreal statevec_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome) { qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } qreal densmatr_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome) { qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } /** computes Tr(conjTrans(a) b) = sum of (a_ij^* b_ij), which is a real number */ __global__ void densmatr_calcInnerProductKernel( Qureg a, Qureg b, long long int numTermsToSum, qreal* reducedArray ) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; // Re{ conj(a) b } = Re{ (aRe - i aIm)(bRe + i bIm) } = aRe bRe + aIm bIm qreal prod = ( a.stateVec.real[index]*b.stateVec.real[index] + a.stateVec.imag[index]*b.stateVec.imag[index]); // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = prod; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } qreal densmatr_calcInnerProduct(Qureg a, Qureg b) { // we're summing the square of every term in the density matrix long long int numValuesToReduce = a.numAmpsTotal; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the terms in each block // arbitrarily store the reduction in the b qureg's array if (firstTime) { hipLaunchKernelGGL(( densmatr_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, a, b, a.numAmpsTotal, b.firstLevelReduction); firstTime = 0; } // sum the block terms else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, b.firstLevelReduction, b.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(b.firstLevelReduction), &(b.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal innerprod; hipMemcpy(&innerprod, b.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return innerprod; } /** computes either a real or imag term in the inner product */ __global__ void statevec_calcInnerProductKernel( int getRealComp, qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2, long long int numTermsToSum, qreal* reducedArray) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; // choose whether to calculate the real or imaginary term of the inner product qreal innerProdTerm; if (getRealComp) innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index]; else innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index]; // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = innerProdTerm; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the * inner product, so as to not have to worry about keeping the sums separated during reduction. * Truly disgusting, probably doubles runtime, please fix. * @TODO could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc? */ Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) { qreal innerProdReal, innerProdImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, bra.stateVec.real, bra.stateVec.imag, ket.stateVec.real, ket.stateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( statevec_calcInnerProductKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, bra.stateVec.real, bra.stateVec.imag, ket.stateVec.real, ket.stateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // return complex Complex innerProd; innerProd.real = innerProdReal; innerProd.imag = innerProdImag; return innerProd; } /** computes one term of (vec^*T) dens * vec */ __global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) { // figure out which density matrix row to consider long long int col; long long int row = blockIdx.x*blockDim.x + threadIdx.x; if (row >= dim) return; qreal* densReal = dens.stateVec.real; qreal* densImag = dens.stateVec.imag; qreal* vecReal = vec.stateVec.real; qreal* vecImag = vec.stateVec.imag; // compute the row-th element of the product dens*vec qreal prodReal = 0; qreal prodImag = 0; for (col=0LL; col < dim; col++) { qreal densElemReal = densReal[dim*col + row]; qreal densElemImag = densImag[dim*col + row]; prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col]; prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col]; } // multiply with row-th elem of (vec^*) qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row]; // imag of every term should be zero, because each is a valid fidelity calc of an eigenstate //qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row]; extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = termReal; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) { // we're summing the square of every term in the density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block // store the reduction in the pureState array if (firstTime) { hipLaunchKernelGGL(( densmatr_calcFidelityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg, pureState, densityDim, pureState.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, pureState.firstLevelReduction, pureState.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal fidelity; hipMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return fidelity; } __global__ void densmatr_calcHilbertSchmidtDistanceSquaredKernel( qreal* aRe, qreal* aIm, qreal* bRe, qreal* bIm, long long int numAmpsToSum, qreal *reducedArray ) { // figure out which density matrix term this thread is assigned long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numAmpsToSum) return; // compute this thread's sum term qreal difRe = aRe[index] - bRe[index]; qreal difIm = aIm[index] - bIm[index]; qreal term = difRe*difRe + difIm*difIm; // array of each thread's collected term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = term; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /* computes sqrt(Tr( (a-b) conjTrans(a-b) ) = sqrt( sum of abs vals of (a-b)) */ qreal densmatr_calcHilbertSchmidtDistance(Qureg a, Qureg b) { // we're summing the square of every term in (a-b) long long int numValuesToReduce = a.numAmpsPerChunk; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block (store reduction temp values in a's reduction array) if (firstTime) { hipLaunchKernelGGL(( densmatr_calcHilbertSchmidtDistanceSquaredKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, a.stateVec.real, a.stateVec.imag, b.stateVec.real, b.stateVec.imag, numValuesToReduce, a.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, a.firstLevelReduction, a.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(a.firstLevelReduction), &(a.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal trace; hipMemcpy(&trace, a.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); qreal sqrtTrace = sqrt(trace); return sqrtTrace; } __global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) { // figure out which density matrix term this thread is assigned long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numAmpsToSum) return; qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = term; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Computes the trace of the density matrix squared */ qreal densmatr_calcPurity(Qureg qureg) { // we're summing the square of every term in the density matrix long long int numValuesToReduce = qureg.numAmpsPerChunk; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { hipLaunchKernelGGL(( densmatr_calcPurityKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, qureg.stateVec.real, qureg.stateVec.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal traceDensSquared; hipMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); return traceDensSquared; } __global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- measured probability qreal renorm; // probability (returned) value // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity // (good for shared memory parallelism) long long int numTasks=qureg.numAmpsPerChunk>>1; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // renorm=1/sqrt(totalProbability); qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; if (outcome==0){ stateVecReal[index]=stateVecReal[index]*renorm; stateVecImag[index]=stateVecImag[index]*renorm; stateVecReal[index+sizeHalfBlock]=0; stateVecImag[index+sizeHalfBlock]=0; } else if (outcome==1){ stateVecReal[index]=0; stateVecImag[index]=0; stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm; stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm; } } /* * outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or * else the state-vector will lose normalisation */ void statevec_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, measureQubit, outcome, outcomeProb); } /** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */ __global__ void densmatr_collapseToKnownProbOutcomeKernel( qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit, long long int part1, long long int part2, long long int part3, long long int rowBit, long long int colBit, long long int desired, long long int undesired) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numBasesToVisit) return; long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); // renormalise desired outcome vecReal[base + desired] /= outcomeProb; vecImag[base + desired] /= outcomeProb; // kill undesired outcome vecReal[base + undesired] = 0; vecImag[base + undesired] = 0; // kill |..0..><..1..| states vecReal[base + colBit] = 0; vecImag[base + colBit] = 0; vecReal[base + rowBit] = 0; vecImag[base + rowBit] = 0; } /** This involves finding |...i...><...j...| states and killing those where i!=j */ void densmatr_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) { int rowQubit = measureQubit + qureg.numQubitsRepresented; int colBit = 1LL << measureQubit; int rowBit = 1LL << rowQubit; long long int numBasesToVisit = qureg.numAmpsPerChunk/4; long long int part1 = colBit -1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numBasesToVisit - (rowBit >> 1); long long int desired, undesired; if (outcome == 0) { desired = 0; undesired = colBit | rowBit; } else { desired = colBit | rowBit; undesired = 0; } int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_collapseToKnownProbOutcomeKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, outcomeProb, qureg.stateVec.real, qureg.stateVec.imag, numBasesToVisit, part1, part2, part3, rowBit, colBit, desired, undesired); } __global__ void densmatr_mixDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) { long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; if (ampInd >= numAmpsToVisit) return; combineQureg.stateVec.real[ampInd] *= 1-otherProb; combineQureg.stateVec.imag[ampInd] *= 1-otherProb; combineQureg.stateVec.real[ampInd] += otherProb*otherQureg.stateVec.real[ampInd]; combineQureg.stateVec.imag[ampInd] += otherProb*otherQureg.stateVec.imag[ampInd]; } void densmatr_mixDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) { long long int numAmpsToVisit = combineQureg.numAmpsPerChunk; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixDensityMatrixKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, combineQureg, otherProb, otherQureg, numAmpsToVisit ); } /** Called once for every 4 amplitudes in density matrix * Works by establishing the |..0..><..0..| state (for its given index) then * visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3| * From the brain of Simon Benjamin */ __global__ void densmatr_mixDephasingKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int colBit, long long int rowBit) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); vecReal[ampInd + colBit] *= fac; vecImag[ampInd + colBit] *= fac; vecReal[ampInd + rowBit] *= fac; vecImag[ampInd + rowBit] *= fac; } void densmatr_oneQubitDegradeOffDiagonal(Qureg qureg, int targetQubit, qreal dephFac) { long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixDephasingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, dephFac, qureg.stateVec.real, qureg.stateVec.imag, numAmpsToVisit, part1, part2, part3, colBit, rowBit); } void densmatr_mixDephasing(Qureg qureg, int targetQubit, qreal dephase) { if (dephase == 0) return; qreal dephFac = 1 - dephase; densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephFac); } /** Called 12 times for every 16 amplitudes in density matrix * Each sums from the |..0..0..><..0..0..| index to visit either * |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..| * etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|. * From the brain of Simon Benjamin */ __global__ void densmatr_mixTwoQubitDephasingKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2) { long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x; if (outerInd >= numAmpsToVisit) return; // sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A| int meta = 1 + (outerInd/numBackgroundStates); if (meta > 4) meta++; if (meta > 9) meta++; long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2); long long int scanInd = outerInd % numBackgroundStates; long long int stateInd = ( shift + (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4)); vecReal[stateInd] *= fac; vecImag[stateInd] *= fac; } // @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems? void densmatr_mixTwoQubitDephasing(Qureg qureg, int qubit1, int qubit2, qreal dephase) { if (dephase == 0) return; // assumes qubit2 > qubit1 int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3); qreal dephFac = 1 - dephase; // refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed) long long int numBackgroundStates = qureg.numAmpsPerChunk/16; // 12 of these states experience dephasing long long int numAmpsToVisit = 12 * numBackgroundStates; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixTwoQubitDephasingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, dephFac, qureg.stateVec.real, qureg.stateVec.imag, numBackgroundStates, numAmpsToVisit, part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2); } /** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */ __global__ void densmatr_mixDepolarisingKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int bothBits) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); long long int targetInd = baseInd + bothBits; qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]); qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]); vecReal[baseInd] *= 1 - depolLevel; vecImag[baseInd] *= 1 - depolLevel; vecReal[targetInd] *= 1 - depolLevel; vecImag[targetInd] *= 1 - depolLevel; vecReal[baseInd] += realAvDepol; vecImag[baseInd] += imagAvDepol; vecReal[targetInd] += realAvDepol; vecImag[targetInd] += imagAvDepol; } /** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */ __global__ void densmatr_mixDampingKernel( qreal damping, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int bothBits) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); long long int targetInd = baseInd + bothBits; qreal realAvDepol = damping * ( vecReal[targetInd]); qreal imagAvDepol = damping * ( vecImag[targetInd]); vecReal[targetInd] *= 1 - damping; vecImag[targetInd] *= 1 - damping; vecReal[baseInd] += realAvDepol; vecImag[baseInd] += imagAvDepol; } void densmatr_mixDepolarising(Qureg qureg, int targetQubit, qreal depolLevel) { if (depolLevel == 0) return; densmatr_mixDephasing(qureg, targetQubit, depolLevel); long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int bothBits = colBit | rowBit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixDepolarisingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, depolLevel, qureg.stateVec.real, qureg.stateVec.imag, numAmpsToVisit, part1, part2, part3, bothBits); } void densmatr_mixDamping(Qureg qureg, int targetQubit, qreal damping) { if (damping == 0) return; qreal dephase = sqrt(1-damping); densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephase); long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int bothBits = colBit | rowBit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixDampingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, damping, qureg.stateVec.real, qureg.stateVec.imag, numAmpsToVisit, part1, part2, part3, bothBits); } /** Called once for every 16 amplitudes */ __global__ void densmatr_mixTwoQubitDepolarisingKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int rowCol1, long long int rowCol2) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; // index of |..0..0..><..0..0| long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4); long long int ind01 = ind00 + rowCol1; long long int ind10 = ind00 + rowCol2; long long int ind11 = ind00 + rowCol1 + rowCol2; qreal realAvDepol = depolLevel * 0.25 * ( vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]); qreal imagAvDepol = depolLevel * 0.25 * ( vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]); qreal retain = 1 - depolLevel; vecReal[ind00] *= retain; vecImag[ind00] *= retain; vecReal[ind01] *= retain; vecImag[ind01] *= retain; vecReal[ind10] *= retain; vecImag[ind10] *= retain; vecReal[ind11] *= retain; vecImag[ind11] *= retain; vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol; vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol; vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol; vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol; } void densmatr_mixTwoQubitDepolarising(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) { if (depolLevel == 0) return; // assumes qubit2 > qubit1 densmatr_mixTwoQubitDephasing(qureg, qubit1, qubit2, depolLevel); int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int rowCol1 = colBit1 | rowBit1; long long int rowCol2 = colBit2 | rowBit2; long long int numAmpsToVisit = qureg.numAmpsPerChunk/16; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = numAmpsToVisit - (rowBit2 >> 3); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_mixTwoQubitDepolarisingKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, depolLevel, qureg.stateVec.real, qureg.stateVec.imag, numAmpsToVisit, part1, part2, part3, part4, part5, rowCol1, rowCol2); } __global__ void statevec_setWeightedQuregKernel(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) { long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; long long int numAmpsToVisit = qureg1.numAmpsPerChunk; if (ampInd >= numAmpsToVisit) return; qreal *vecRe1 = qureg1.stateVec.real; qreal *vecIm1 = qureg1.stateVec.imag; qreal *vecRe2 = qureg2.stateVec.real; qreal *vecIm2 = qureg2.stateVec.imag; qreal *vecReOut = out.stateVec.real; qreal *vecImOut = out.stateVec.imag; qreal facRe1 = fac1.real; qreal facIm1 = fac1.imag; qreal facRe2 = fac2.real; qreal facIm2 = fac2.imag; qreal facReOut = facOut.real; qreal facImOut = facOut.imag; qreal re1,im1, re2,im2, reOut,imOut; long long int index = ampInd; re1 = vecRe1[index]; im1 = vecIm1[index]; re2 = vecRe2[index]; im2 = vecIm2[index]; reOut = vecReOut[index]; imOut = vecImOut[index]; vecReOut[index] = (facReOut*reOut - facImOut*imOut) + (facRe1*re1 - facIm1*im1) + (facRe2*re2 - facIm2*im2); vecImOut[index] = (facReOut*imOut + facImOut*reOut) + (facRe1*im1 + facIm1*re1) + (facRe2*im2 + facIm2*re2); } void statevec_setWeightedQureg(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) { long long int numAmpsToVisit = qureg1.numAmpsPerChunk; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_setWeightedQuregKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, fac1, qureg1, fac2, qureg2, facOut, out ); } __global__ void statevec_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) { // each thread modifies one value; a wasteful and inefficient strategy long long int numTasks = qureg.numAmpsPerChunk; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask >= numTasks) return; qreal* stateRe = qureg.stateVec.real; qreal* stateIm = qureg.stateVec.imag; qreal* opRe = op.deviceOperator.real; qreal* opIm = op.deviceOperator.imag; qreal a = stateRe[thisTask]; qreal b = stateIm[thisTask]; qreal c = opRe[thisTask]; qreal d = opIm[thisTask]; // (a + b i)(c + d i) = (a c - b d) + i (a d + b c) stateRe[thisTask] = a*c - b*d; stateIm[thisTask] = a*d + b*c; } void statevec_applyDiagonalOp(Qureg qureg, DiagonalOp op) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( statevec_applyDiagonalOpKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, op); } __global__ void densmatr_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) { // each thread modifies one value; a wasteful and inefficient strategy long long int numTasks = qureg.numAmpsPerChunk; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask >= numTasks) return; qreal* stateRe = qureg.stateVec.real; qreal* stateIm = qureg.stateVec.imag; qreal* opRe = op.deviceOperator.real; qreal* opIm = op.deviceOperator.imag; int opDim = (1 << op.numQubits); qreal a = stateRe[thisTask]; qreal b = stateIm[thisTask]; qreal c = opRe[thisTask % opDim]; qreal d = opIm[thisTask % opDim]; // (a + b i)(c + d i) = (a c - b d) + i (a d + b c) stateRe[thisTask] = a*c - b*d; stateIm[thisTask] = a*d + b*c; } void densmatr_applyDiagonalOp(Qureg qureg, DiagonalOp op) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); hipLaunchKernelGGL(( densmatr_applyDiagonalOpKernel), dim3(CUDABlocks), dim3(threadsPerCUDABlock), 0, 0, qureg, op); } /** computes either a real or imag term of |vec_i|^2 op_i */ __global__ void statevec_calcExpecDiagonalOpKernel( int getRealComp, qreal* vecReal, qreal* vecImag, qreal* opReal, qreal* opImag, long long int numTermsToSum, qreal* reducedArray) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; qreal vecAbs = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; // choose whether to calculate the real or imaginary term of the expec term qreal expecVal; if (getRealComp) expecVal = vecAbs * opReal[index]; else expecVal = vecAbs * opImag[index]; // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = expecVal; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } Complex statevec_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) { /* @TODO: remove all this reduction boilerplate from QuEST GPU * (e.g. a func which accepts a pointer to do every-value reduction?) */ qreal expecReal, expecImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( statevec_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, qureg.stateVec.real, qureg.stateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( statevec_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, qureg.stateVec.real, qureg.stateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // return complex Complex expecVal; expecVal.real = expecReal; expecVal.imag = expecImag; return expecVal; } __global__ void densmatr_calcExpecDiagonalOpKernel( int getRealComp, qreal* matReal, qreal* matImag, qreal* opReal, qreal* opImag, int numQubits, long long int numTermsToSum, qreal* reducedArray) { /** if the thread represents a diagonal op, then it computes either a * real or imag term of matr_{ii} op_i. Otherwise, it writes a 0 to the * reduction array */ // index will identy one of the 2^Q diagonals to be summed long long int matInd = blockIdx.x*blockDim.x + threadIdx.x; if (matInd >= numTermsToSum) return; long long int diagSpacing = (1LL << numQubits) + 1LL; int isDiag = ((matInd % diagSpacing) == 0); long long int opInd = matInd / diagSpacing; qreal val = 0; if (isDiag) { qreal matRe = matReal[matInd]; qreal matIm = matImag[matInd]; qreal opRe = opReal[opInd]; qreal opIm = opImag[opInd]; // (matRe + matIm i)(opRe + opIm i) = // (matRe opRe - matIm opIm) + i (matRe opIm + matIm opRe) if (getRealComp) val = matRe * opRe - matIm * opIm; else val = matRe * opIm + matIm * opRe; } // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = val; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } Complex densmatr_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) { /* @TODO: remove all this reduction boilerplate from QuEST GPU * (e.g. a func which accepts a pointer to do every-value reduction?) */ qreal expecReal, expecImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( densmatr_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, qureg.stateVec.real, qureg.stateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, op.numQubits, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { hipLaunchKernelGGL(( densmatr_calcExpecDiagonalOpKernel), dim3(numCUDABlocks), dim3(valuesPerCUDABlock), sharedMemSize, 0, getRealComp, qureg.stateVec.real, qureg.stateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, op.numQubits, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { hipDeviceSynchronize(); hipLaunchKernelGGL(( copySharedReduceBlock), dim3(numCUDABlocks), dim3(valuesPerCUDABlock/2), sharedMemSize, 0, qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); hipDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } hipMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), hipMemcpyDeviceToHost); // return complex Complex expecVal; expecVal.real = expecReal; expecVal.imag = expecImag; return expecVal; } void agnostic_setDiagonalOpElems(DiagonalOp op, long long int startInd, qreal* real, qreal* imag, long long int numElems) { // update both RAM and VRAM, for consistency memcpy(&op.real[startInd], real, numElems * sizeof(qreal)); memcpy(&op.imag[startInd], imag, numElems * sizeof(qreal)); hipDeviceSynchronize(); hipMemcpy( op.deviceOperator.real + startInd, real, numElems * sizeof(*(op.deviceOperator.real)), hipMemcpyHostToDevice); hipMemcpy( op.deviceOperator.imag + startInd, imag, numElems * sizeof(*(op.deviceOperator.imag)), hipMemcpyHostToDevice); } void seedQuESTDefault(){ // init MT random number generator with three keys -- time and pid // for the MPI version, it is ok that all procs will get the same seed as random numbers will only be // used by the master process unsigned long int key[2]; getQuESTDefaultSeedKey(key); init_by_array(key, 2); } #ifdef __cplusplus } #endif
e21ad0d17e764484db6d97b2cfe7e6b2b32f02c4.cu
// Distributed under MIT licence. See https://github.com/QuEST-Kit/QuEST/blob/master/LICENCE.txt for details /** @file * An implementation of the backend in ../QuEST_internal.h for a GPU environment. * * @author Ania Brown * @author Tyson Jones */ # include "QuEST.h" # include "QuEST_precision.h" # include "QuEST_internal.h" // purely to resolve getQuESTDefaultSeedKey # include "mt19937ar.h" # include <stdlib.h> # include <stdio.h> # include <math.h> # define REDUCE_SHARED_SIZE 512 # define DEBUG 1 /* * struct types for concisely passing unitaries to kernels */ // hide these from doxygen /// \cond HIDDEN_SYMBOLS typedef struct ArgMatrix2 { Complex r0c0, r0c1; Complex r1c0, r1c1; } ArgMatrix2; typedef struct ArgMatrix4 { Complex r0c0, r0c1, r0c2, r0c3; Complex r1c0, r1c1, r1c2, r1c3; Complex r2c0, r2c1, r2c2, r2c3; Complex r3c0, r3c1, r3c2, r3c3; } ArgMatrix4; ArgMatrix2 argifyMatrix2(ComplexMatrix2 m) { ArgMatrix2 a; a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0]; a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1]; a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0]; a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1]; return a; } ArgMatrix4 argifyMatrix4(ComplexMatrix4 m) { ArgMatrix4 a; a.r0c0.real=m.real[0][0]; a.r0c0.imag=m.imag[0][0]; a.r0c1.real=m.real[0][1]; a.r0c1.imag=m.imag[0][1]; a.r0c2.real=m.real[0][2]; a.r0c2.imag=m.imag[0][2]; a.r0c3.real=m.real[0][3]; a.r0c3.imag=m.imag[0][3]; a.r1c0.real=m.real[1][0]; a.r1c0.imag=m.imag[1][0]; a.r1c1.real=m.real[1][1]; a.r1c1.imag=m.imag[1][1]; a.r1c2.real=m.real[1][2]; a.r1c2.imag=m.imag[1][2]; a.r1c3.real=m.real[1][3]; a.r1c3.imag=m.imag[1][3]; a.r2c0.real=m.real[2][0]; a.r2c0.imag=m.imag[2][0]; a.r2c1.real=m.real[2][1]; a.r2c1.imag=m.imag[2][1]; a.r2c2.real=m.real[2][2]; a.r2c2.imag=m.imag[2][2]; a.r2c3.real=m.real[2][3]; a.r2c3.imag=m.imag[2][3]; a.r3c0.real=m.real[3][0]; a.r3c0.imag=m.imag[3][0]; a.r3c1.real=m.real[3][1]; a.r3c1.imag=m.imag[3][1]; a.r3c2.real=m.real[3][2]; a.r3c2.imag=m.imag[3][2]; a.r3c3.real=m.real[3][3]; a.r3c3.imag=m.imag[3][3]; return a; } /// \endcond /* * in-kernel bit twiddling functions */ __forceinline__ __device__ int extractBit (const int locationOfBitFromRight, const long long int theEncodedNumber) { return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight; } __forceinline__ __device__ int getBitMaskParity(long long int mask) { int parity = 0; while (mask) { parity = !parity; mask = mask & (mask-1); } return parity; } __forceinline__ __device__ long long int flipBit(const long long int number, const int bitInd) { return (number ^ (1LL << bitInd)); } __forceinline__ __device__ long long int insertZeroBit(const long long int number, const int index) { long long int left, right; left = (number >> index) << index; right = number - left; return (left << 1) ^ right; } __forceinline__ __device__ long long int insertTwoZeroBits(const long long int number, const int bit1, const int bit2) { int small = (bit1 < bit2)? bit1 : bit2; int big = (bit1 < bit2)? bit2 : bit1; return insertZeroBit(insertZeroBit(number, small), big); } __forceinline__ __device__ long long int insertZeroBits(long long int number, int* inds, const int numInds) { /* inserted bit inds must strictly increase, so that their final indices are correct. * in-lieu of sorting (avoided since no C++ variable-size arrays, and since we're already * memory bottle-necked so overhead eats this slowdown), we find the next-smallest index each * at each insert. recall every element of inds (a positive or zero number) is unique. * This function won't appear in the CPU code, which can use C99 variable-size arrays and * ought to make a sorted array before threading */ int curMin = inds[0]; int prevMin = -1; for (int n=0; n < numInds; n++) { // find next min for (int t=0; t < numInds; t++) if (inds[t]>prevMin && inds[t]<curMin) curMin = inds[t]; number = insertZeroBit(number, curMin); // set curMin to an arbitrary non-visited elem prevMin = curMin; for (int t=0; t < numInds; t++) if (inds[t] > curMin) { curMin = inds[t]; break; } } return number; } /* * state vector and density matrix operations */ #ifdef __cplusplus extern "C" { #endif void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) { cudaDeviceSynchronize(); cudaMemcpy( qureg.stateVec.real + startInd, reals, numAmps * sizeof(*(qureg.stateVec.real)), cudaMemcpyHostToDevice); cudaMemcpy( qureg.stateVec.imag + startInd, imags, numAmps * sizeof(*(qureg.stateVec.imag)), cudaMemcpyHostToDevice); } /** works for both statevectors and density matrices */ void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) { // copy copyQureg's GPU statevec to targetQureg's GPU statevec cudaDeviceSynchronize(); cudaMemcpy( targetQureg.stateVec.real, copyQureg.stateVec.real, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.stateVec.real)), cudaMemcpyDeviceToDevice); cudaMemcpy( targetQureg.stateVec.imag, copyQureg.stateVec.imag, targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.stateVec.imag)), cudaMemcpyDeviceToDevice); } __global__ void densmatr_initPureStateKernel( long long int numPureAmps, qreal *targetVecReal, qreal *targetVecImag, qreal *copyVecReal, qreal *copyVecImag) { // this is a particular index of the pure copyQureg long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=numPureAmps) return; qreal realRow = copyVecReal[index]; qreal imagRow = copyVecImag[index]; for (long long int col=0; col < numPureAmps; col++) { qreal realCol = copyVecReal[col]; qreal imagCol = - copyVecImag[col]; // minus for conjugation targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol; targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol; } } void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock); densmatr_initPureStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( copyQureg.numAmpsPerChunk, targetQureg.stateVec.real, targetQureg.stateVec.imag, copyQureg.stateVec.real, copyQureg.stateVec.imag); } __global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = probFactor; stateVecImag[index] = 0.0; } void densmatr_initPlusState(Qureg qureg) { qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented)); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); densmatr_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, probFactor, qureg.stateVec.real, qureg.stateVec.imag); } __global__ void densmatr_initClassicalStateKernel( long long int densityNumElems, qreal *densityReal, qreal *densityImag, long long int densityInd) { // initialise the state to all zeros long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= densityNumElems) return; densityReal[index] = 0.0; densityImag[index] = 0.0; if (index==densityInd){ // classical state has probability 1 densityReal[densityInd] = 1.0; densityImag[densityInd] = 0.0; } } void densmatr_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); // index of the desired state in the flat density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int densityInd = (densityDim + 1)*stateInd; // identical to pure version densmatr_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.stateVec.real, qureg.stateVec.imag, densityInd); } void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env) { printf("statevec_createQureg\n"); // allocate CPU memory long long int numAmps = 1L << numQubits; long long int numAmpsPerRank = numAmps/env.numRanks; //qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real)); //qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag)); if (env.numRanks>1){ qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real)); qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag)); } // check cpu memory allocation was successful //if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag)) // && numAmpsPerRank ) { // printf("Could not allocate memory!\n"); // exit (EXIT_FAILURE); //} if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag)) && numAmpsPerRank ) { printf("Could not allocate memory!\n"); exit (EXIT_FAILURE); } qureg->numQubitsInStateVec = numQubits; qureg->numAmpsPerChunk = numAmpsPerRank; qureg->numAmpsTotal = numAmps; qureg->chunkId = env.rank; qureg->numChunks = env.numRanks; qureg->isDensityMatrix = 0; // allocate GPU memory cudaMallocManaged(&(qureg->stateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->stateVec.real))); cudaMallocManaged(&(qureg->stateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->stateVec.imag))); cudaMallocManaged(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal)); cudaMallocManaged(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))* sizeof(qreal)); //qureg->stateVec.real = (qreal*)malloc(qureg->numAmpsPerChunk*sizeof(*(qureg->stateVec.real))); //qureg->stateVec.imag = (qreal*)malloc(qureg->numAmpsPerChunk*sizeof(*(qureg->stateVec.imag))); // check gpu memory allocation was successful if (!(qureg->stateVec.real) || !(qureg->stateVec.imag)){ printf("Could not allocate memory on GPU!\n"); exit (EXIT_FAILURE); } } void statevec_destroyQureg(Qureg qureg, QuESTEnv env) { // Free CPU memory //free(qureg.stateVec.real); //free(qureg.stateVec.imag); if (env.numRanks>1){ free(qureg.pairStateVec.real); free(qureg.pairStateVec.imag); } // Free GPU memory cudaFree(qureg.stateVec.real); cudaFree(qureg.stateVec.imag); cudaFree(qureg.firstLevelReduction); cudaFree(qureg.secondLevelReduction); //free(qureg.stateVec.real); //free(qureg.stateVec.imag); } DiagonalOp agnostic_createDiagonalOp(int numQubits, QuESTEnv env) { DiagonalOp op; op.numQubits = numQubits; op.numElemsPerChunk = (1LL << numQubits) / env.numRanks; op.chunkId = env.rank; op.numChunks = env.numRanks; // allocate CPU memory (initialised to zero) op.real = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal)); op.imag = (qreal*) calloc(op.numElemsPerChunk, sizeof(qreal)); // @TODO no handling of rank>1 allocation (no distributed GPU) // check cpu memory allocation was successful if ( !op.real || !op.imag ) { printf("Could not allocate memory!\n"); exit(EXIT_FAILURE); } // allocate GPU memory size_t arrSize = op.numElemsPerChunk * sizeof(qreal); cudaMalloc(&(op.deviceOperator.real), arrSize); cudaMalloc(&(op.deviceOperator.imag), arrSize); // check gpu memory allocation was successful if (!op.deviceOperator.real || !op.deviceOperator.imag) { printf("Could not allocate memory on GPU!\n"); exit(EXIT_FAILURE); } // initialise GPU memory to zero cudaMemset(op.deviceOperator.real, 0, arrSize); cudaMemset(op.deviceOperator.imag, 0, arrSize); return op; } void agnostic_destroyDiagonalOp(DiagonalOp op) { free(op.real); free(op.imag); cudaFree(op.deviceOperator.real); cudaFree(op.deviceOperator.imag); } void agnostic_syncDiagonalOp(DiagonalOp op) { size_t arrSize = (1LL << op.numQubits) * sizeof(qreal); cudaDeviceSynchronize(); cudaMemcpy(op.deviceOperator.real, op.real, arrSize, cudaMemcpyHostToDevice); cudaMemcpy(op.deviceOperator.imag, op.imag, arrSize, cudaMemcpyHostToDevice); } int GPUExists(void){ int deviceCount, device; int gpuDeviceCount = 0; struct cudaDeviceProp properties; cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount); if (cudaResultCode != cudaSuccess) deviceCount = 0; /* machines with no GPUs can still report one emulation device */ for (device = 0; device < deviceCount; ++device) { cudaGetDeviceProperties(&properties, device); if (properties.major != 9999) { /* 9999 means emulation only */ ++gpuDeviceCount; } } if (gpuDeviceCount) return 1; else return 0; } QuESTEnv createQuESTEnv(void) { if (!GPUExists()){ printf("Trying to run GPU code with no GPU available\n"); exit(EXIT_FAILURE); } QuESTEnv env; env.rank=0; env.numRanks=1; seedQuESTDefault(); return env; } void syncQuESTEnv(QuESTEnv env){ cudaDeviceSynchronize(); } int syncQuESTSuccess(int successCode){ return successCode; } void destroyQuESTEnv(QuESTEnv env){ // MPI finalize goes here in MPI version. Call this function anyway for consistency } void reportQuESTEnv(QuESTEnv env){ printf("EXECUTION ENVIRONMENT:\n"); printf("Running locally on one node with GPU\n"); printf("Number of ranks is %d\n", env.numRanks); # ifdef _OPENMP printf("OpenMP enabled\n"); printf("Number of threads available is %d\n", omp_get_max_threads()); # else printf("OpenMP disabled\n"); # endif } void getEnvironmentString(QuESTEnv env, Qureg qureg, char str[200]){ sprintf(str, "%dqubits_GPU_noMpi_noOMP", qureg.numQubitsInStateVec); } void copyStateToGPU(Qureg qureg) { //if (DEBUG) printf("Copying data to GPU\n"); //cudaMemcpy(qureg.stateVec.real, qureg.stateVec.real, // qureg.numAmpsPerChunk*sizeof(*(qureg.stateVec.real)), cudaMemcpyHostToDevice); //cudaMemcpy(qureg.stateVec.imag, qureg.stateVec.imag, // qureg.numAmpsPerChunk*sizeof(*(qureg.stateVec.imag)), cudaMemcpyHostToDevice); //if (DEBUG) printf("Finished copying data to GPU\n"); } void copyStateFromGPU(Qureg qureg) { //cudaDeviceSynchronize(); //if (DEBUG) printf("Copying data from GPU\n"); //cudaMemcpy(qureg.stateVec.real, qureg.stateVec.real, // qureg.numAmpsPerChunk*sizeof(*(qureg.stateVec.real)), cudaMemcpyDeviceToHost); //cudaMemcpy(qureg.stateVec.imag, qureg.stateVec.imag, // qureg.numAmpsPerChunk*sizeof(*(qureg.stateVec.imag)), cudaMemcpyDeviceToHost); //if (DEBUG) printf("Finished copying data from GPU\n"); } /** Print the current state vector of probability amplitudes for a set of qubits to standard out. For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits */ void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){ long long int index; int rank; //copyStateFromGPU(qureg); cudaDeviceSynchronize(); if (qureg.numQubitsInStateVec<=5){ for (rank=0; rank<qureg.numChunks; rank++){ if (qureg.chunkId==rank){ if (reportRank) { printf("Reporting state from rank %d [\n", qureg.chunkId); //printf("\trank, index, real, imag\n"); printf("real, imag\n"); } else if (rank==0) { printf("Reporting state [\n"); printf("real, imag\n"); } for(index=0; index<qureg.numAmpsPerChunk; index++){ printf(REAL_STRING_FORMAT ", " REAL_STRING_FORMAT "\n", qureg.stateVec.real[index], qureg.stateVec.imag[index]); } if (reportRank || rank==qureg.numChunks-1) printf("]\n"); } syncQuESTEnv(env); } } } qreal statevec_getRealAmp(Qureg qureg, long long int index){ //qreal el=0; //cudaMemcpy(&el, &(qureg.stateVec.real[index]), // sizeof(*(qureg.stateVec.real)), cudaMemcpyDeviceToHost); //return el; return qureg.stateVec.real[index]; } qreal statevec_getImagAmp(Qureg qureg, long long int index){ //qreal el=0; //cudaMemcpy(&el, &(qureg.stateVec.imag[index]), // sizeof(*(qureg.stateVec.imag)), cudaMemcpyDeviceToHost); //return el; return qureg.stateVec.imag[index]; } __global__ void statevec_initBlankStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; // initialise the statevector to be all-zeros index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; } void statevec_initBlankState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initBlankStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.stateVec.real, qureg.stateVec.imag); } //__global__ void statevec_initZeroStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ __global__ void statevec_initZeroStateKernel(Qureg qureg) { long long int index; long long int stateVecSize = qureg.numAmpsPerChunk; // initialise the state to |0000..0000> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qureg.stateVec.real[index] = 0.0; qureg.stateVec.imag[index] = 0.0; if (index==0){ // zero state |0000..0000> has probability 1 qureg.stateVec.real[0] = 1.0; qureg.stateVec.imag[0] = 0.0; } } void statevec_initZeroState(Qureg qureg) { printf("statevec_initZeroState\n"); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); //statevec_initZeroStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( // qureg.numAmpsPerChunk, // qureg.stateVec.real, // qureg.stateVec.imag); statevec_initZeroStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg); cudaDeviceSynchronize(); printf("after initZeroState real[0]: %g real[1]: %g\n", qureg.stateVec.real[0], qureg.stateVec.real[1]); } __global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize); stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } void statevec_initPlusState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initPlusStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.stateVec.real, qureg.stateVec.imag); } __global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){ long long int index; // initialise the state to |stateInd> index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; if (index==stateInd){ // classical state has probability 1 stateVecReal[stateInd] = 1.0; stateVecImag[stateInd] = 0.0; } } void statevec_initClassicalState(Qureg qureg, long long int stateInd) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initClassicalStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.stateVec.real, qureg.stateVec.imag, stateInd); } __global__ void statevec_initDebugStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ long long int index; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; stateVecReal[index] = (index*2.0)/10.0; stateVecImag[index] = (index*2.0+1.0)/10.0; } void statevec_initDebugState(Qureg qureg) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_initDebugStateKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg.numAmpsPerChunk, qureg.stateVec.real, qureg.stateVec.imag); } __global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){ long long int index; int bit; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2); bit = extractBit(qubitId, index); if (bit==outcome) { stateVecReal[index] = normFactor; stateVecImag[index] = 0.0; } else { stateVecReal[index] = 0.0; stateVecImag[index] = 0.0; } } void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock); statevec_initStateOfSingleQubitKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg->numAmpsPerChunk, qureg->stateVec.real, qureg->stateVec.imag, qubitId, outcome); } // returns 1 if successful, else 0 int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){ long long int chunkSize, stateVecSize; long long int indexInChunk, totalIndex; chunkSize = qureg->numAmpsPerChunk; stateVecSize = chunkSize*qureg->numChunks; qreal *stateVecReal = qureg->stateVec.real; qreal *stateVecImag = qureg->stateVec.imag; FILE *fp; char line[200]; fp = fopen(filename, "r"); if (fp == NULL) return 0; indexInChunk = 0; totalIndex = 0; while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndex<stateVecSize){ if (line[0]!='#'){ int chunkId = totalIndex/chunkSize; if (chunkId==qureg->chunkId){ # if QuEST_PREC==1 sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # elif QuEST_PREC==2 sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # elif QuEST_PREC==4 sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]), &(stateVecImag[indexInChunk])); # endif indexInChunk += 1; } totalIndex += 1; } } fclose(fp); copyStateToGPU(*qureg); // indicate success return 1; } int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){ qreal diff; int chunkSize = mq1.numAmpsPerChunk; //copyStateFromGPU(mq1); //copyStateFromGPU(mq2); for (int i=0; i<chunkSize; i++){ diff = mq1.stateVec.real[i] - mq2.stateVec.real[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i]; if (diff<0) diff *= -1; if (diff>precision) return 0; } return 1; } __global__ void statevec_compactUnitaryKernel (Qureg qureg, int rotQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << rotQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } void statevec_compactUnitary(Qureg qureg, int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_compactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, alpha, beta); } __global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; qreal alphaImag=alpha.imag, alphaReal=alpha.real; qreal betaImag=beta.imag, betaReal=beta.real; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp - betaReal*stateRealLo - betaImag*stateImagLo; stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp - betaReal*stateImagLo + betaImag*stateRealLo; // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + alphaReal*stateRealLo + alphaImag*stateImagLo; stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + alphaReal*stateImagLo - alphaImag*stateRealLo; } } void statevec_controlledCompactUnitary(Qureg qureg, int controlQubit, int targetQubit, Complex alpha, Complex beta) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_controlledCompactUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, alpha, beta); } __global__ void statevec_unitaryKernel(Qureg qureg, int targetQubit, ArgMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } void statevec_unitary(Qureg qureg, int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_unitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, argifyMatrix2(u)); } __global__ void statevec_multiControlledMultiQubitUnitaryKernel( Qureg qureg, long long int ctrlMask, int* targs, int numTargs, qreal* uRe, qreal* uIm, long long int* ampInds, qreal* reAmps, qreal* imAmps, long long int numTargAmps) { // decide the amplitudes this thread will modify long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; long long int numTasks = qureg.numAmpsPerChunk >> numTargs; // kernel called on every 1 in 2^numTargs amplitudes if (thisTask>=numTasks) return; // find this task's start index (where all targs are 0) long long int ind00 = insertZeroBits(thisTask, targs, numTargs); // this task only modifies amplitudes if control qubits are 1 for this state if (ctrlMask && (ctrlMask&ind00) != ctrlMask) return; qreal *reVec = qureg.stateVec.real; qreal *imVec = qureg.stateVec.imag; /* each thread needs: long long int ampInds[numAmps]; qreal reAmps[numAmps]; qreal imAmps[numAmps]; but instead has access to shared arrays, with below stride and offset */ size_t stride = gridDim.x*blockDim.x; size_t offset = blockIdx.x*blockDim.x + threadIdx.x; // determine the indices and record values of target amps long long int ind; for (int i=0; i < numTargAmps; i++) { // get global index of current target qubit assignment ind = ind00; for (int t=0; t < numTargs; t++) if (extractBit(t, i)) ind = flipBit(ind, targs[t]); ampInds[i*stride+offset] = ind; reAmps [i*stride+offset] = reVec[ind]; imAmps [i*stride+offset] = imVec[ind]; } // update the amplitudes for (int r=0; r < numTargAmps; r++) { ind = ampInds[r*stride+offset]; reVec[ind] = 0; imVec[ind] = 0; for (int c=0; c < numTargAmps; c++) { qreal uReElem = uRe[c + r*numTargAmps]; qreal uImElem = uIm[c + r*numTargAmps]; reVec[ind] += reAmps[c*stride+offset]*uReElem - imAmps[c*stride+offset]*uImElem; imVec[ind] += reAmps[c*stride+offset]*uImElem + imAmps[c*stride+offset]*uReElem; } } } void statevec_multiControlledMultiQubitUnitary(Qureg qureg, long long int ctrlMask, int* targs, int numTargs, ComplexMatrixN u) { int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>numTargs)/threadsPerCUDABlock); // allocate device space for global {targs} (length: numTargs) and populate int *d_targs; size_t targMemSize = numTargs * sizeof *d_targs; cudaMallocManaged(&d_targs, targMemSize); cudaMemcpy(d_targs, targs, targMemSize, cudaMemcpyHostToDevice); // flatten out the u.real and u.imag lists int uNumRows = (1 << u.numQubits); qreal* uReFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uReFlat); qreal* uImFlat = (qreal*) malloc(uNumRows*uNumRows * sizeof *uImFlat); long long int i = 0; for (int r=0; r < uNumRows; r++) for (int c=0; c < uNumRows; c++) { uReFlat[i] = u.real[r][c]; uImFlat[i] = u.imag[r][c]; i++; } // allocate device space for global u.real and u.imag (flatten by concatenating rows) and populate qreal* d_uRe; qreal* d_uIm; size_t uMemSize = uNumRows*uNumRows * sizeof *d_uRe; // size of each of d_uRe and d_uIm cudaMallocManaged(&d_uRe, uMemSize); cudaMallocManaged(&d_uIm, uMemSize); cudaMemcpy(d_uRe, uReFlat, uMemSize, cudaMemcpyHostToDevice); cudaMemcpy(d_uIm, uImFlat, uMemSize, cudaMemcpyHostToDevice); // allocate device Wspace for thread-local {ampInds}, {reAmps}, {imAmps} (length: 1<<numTargs) long long int *d_ampInds; qreal *d_reAmps; qreal *d_imAmps; size_t gridSize = (size_t) threadsPerCUDABlock * CUDABlocks; int numTargAmps = uNumRows; cudaMallocManaged(&d_ampInds, numTargAmps*gridSize * sizeof *d_ampInds); cudaMallocManaged(&d_reAmps, numTargAmps*gridSize * sizeof *d_reAmps); cudaMallocManaged(&d_imAmps, numTargAmps*gridSize * sizeof *d_imAmps); // call kernel statevec_multiControlledMultiQubitUnitaryKernel<<<CUDABlocks,threadsPerCUDABlock>>>( qureg, ctrlMask, d_targs, numTargs, d_uRe, d_uIm, d_ampInds, d_reAmps, d_imAmps, numTargAmps); // free kernel memory free(uReFlat); free(uImFlat); cudaFree(d_targs); cudaFree(d_uRe); cudaFree(d_uIm); cudaFree(d_ampInds); cudaFree(d_reAmps); cudaFree(d_imAmps); } __global__ void statevec_multiControlledTwoQubitUnitaryKernel(Qureg qureg, long long int ctrlMask, int q1, int q2, ArgMatrix4 u){ // decide the 4 amplitudes this thread will modify long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; long long int numTasks = qureg.numAmpsPerChunk >> 2; // kernel called on every 1 in 4 amplitudes if (thisTask>=numTasks) return; qreal *reVec = qureg.stateVec.real; qreal *imVec = qureg.stateVec.imag; // find indices of amplitudes to modify (treat q1 as the least significant bit) long long int ind00, ind01, ind10, ind11; ind00 = insertTwoZeroBits(thisTask, q1, q2); // modify only if control qubits are 1 for this state if (ctrlMask && (ctrlMask&ind00) != ctrlMask) return; ind01 = flipBit(ind00, q1); ind10 = flipBit(ind00, q2); ind11 = flipBit(ind01, q2); // extract statevec amplitudes qreal re00, re01, re10, re11; qreal im00, im01, im10, im11; re00 = reVec[ind00]; im00 = imVec[ind00]; re01 = reVec[ind01]; im01 = imVec[ind01]; re10 = reVec[ind10]; im10 = imVec[ind10]; re11 = reVec[ind11]; im11 = imVec[ind11]; // apply u * {amp00, amp01, amp10, amp11} reVec[ind00] = u.r0c0.real*re00 - u.r0c0.imag*im00 + u.r0c1.real*re01 - u.r0c1.imag*im01 + u.r0c2.real*re10 - u.r0c2.imag*im10 + u.r0c3.real*re11 - u.r0c3.imag*im11; imVec[ind00] = u.r0c0.imag*re00 + u.r0c0.real*im00 + u.r0c1.imag*re01 + u.r0c1.real*im01 + u.r0c2.imag*re10 + u.r0c2.real*im10 + u.r0c3.imag*re11 + u.r0c3.real*im11; reVec[ind01] = u.r1c0.real*re00 - u.r1c0.imag*im00 + u.r1c1.real*re01 - u.r1c1.imag*im01 + u.r1c2.real*re10 - u.r1c2.imag*im10 + u.r1c3.real*re11 - u.r1c3.imag*im11; imVec[ind01] = u.r1c0.imag*re00 + u.r1c0.real*im00 + u.r1c1.imag*re01 + u.r1c1.real*im01 + u.r1c2.imag*re10 + u.r1c2.real*im10 + u.r1c3.imag*re11 + u.r1c3.real*im11; reVec[ind10] = u.r2c0.real*re00 - u.r2c0.imag*im00 + u.r2c1.real*re01 - u.r2c1.imag*im01 + u.r2c2.real*re10 - u.r2c2.imag*im10 + u.r2c3.real*re11 - u.r2c3.imag*im11; imVec[ind10] = u.r2c0.imag*re00 + u.r2c0.real*im00 + u.r2c1.imag*re01 + u.r2c1.real*im01 + u.r2c2.imag*re10 + u.r2c2.real*im10 + u.r2c3.imag*re11 + u.r2c3.real*im11; reVec[ind11] = u.r3c0.real*re00 - u.r3c0.imag*im00 + u.r3c1.real*re01 - u.r3c1.imag*im01 + u.r3c2.real*re10 - u.r3c2.imag*im10 + u.r3c3.real*re11 - u.r3c3.imag*im11; imVec[ind11] = u.r3c0.imag*re00 + u.r3c0.real*im00 + u.r3c1.imag*re01 + u.r3c1.real*im01 + u.r3c2.imag*re10 + u.r3c2.real*im10 + u.r3c3.imag*re11 + u.r3c3.real*im11; } void statevec_multiControlledTwoQubitUnitary(Qureg qureg, long long int ctrlMask, int q1, int q2, ComplexMatrix4 u) { int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); // one kernel eval for every 4 amplitudes statevec_multiControlledTwoQubitUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, ctrlMask, q1, q2, argifyMatrix4(u)); } __global__ void statevec_controlledUnitaryKernel(Qureg qureg, int controlQubit, int targetQubit, ArgMatrix2 u){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; int controlBit; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_controlledUnitary(Qureg qureg, int controlQubit, int targetQubit, ComplexMatrix2 u) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_controlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, argifyMatrix2(u)); } __global__ void statevec_multiControlledUnitaryKernel( Qureg qureg, long long int ctrlQubitsMask, long long int ctrlFlipMask, int targetQubit, ArgMatrix2 u ){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; if (ctrlQubitsMask == (ctrlQubitsMask & (indexUp ^ ctrlFlipMask))) { // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; } } void statevec_multiControlledUnitary( Qureg qureg, long long int ctrlQubitsMask, long long int ctrlFlipMask, int targetQubit, ComplexMatrix2 u ){ int threadsPerCUDABlock = 128; int CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_multiControlledUnitaryKernel<<<CUDABlocks, threadsPerCUDABlock>>>( qureg, ctrlQubitsMask, ctrlFlipMask, targetQubit, argifyMatrix2(u)); } __global__ void statevec_pauliXKernel(Qureg qureg, int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } void statevec_pauliX(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_pauliXKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit); } __global__ void statevec_pauliYKernel(Qureg qureg, int targetQubit, int conjFac){ long long int sizeHalfBlock = 1LL << targetQubit; long long int sizeBlock = 2LL * sizeHalfBlock; long long int numTasks = qureg.numAmpsPerChunk >> 1; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; long long int thisBlock = thisTask / sizeHalfBlock; long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; long long int indexLo = indexUp + sizeHalfBlock; qreal stateRealUp, stateImagUp; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } void statevec_pauliY(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, 1); } void statevec_pauliYConj(Qureg qureg, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_pauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, -1); } __global__ void statevec_controlledPauliYKernel(Qureg qureg, int controlQubit, int targetQubit, int conjFac) { long long int index; long long int sizeBlock, sizeHalfBlock; long long int stateVecSize; int controlBit; qreal stateRealUp, stateImagUp; long long int thisBlock, indexUp, indexLo; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; // update under +-{{0, -i}, {i, 0}} stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; stateVecReal[indexLo] = conjFac * -stateImagUp; stateVecImag[indexLo] = conjFac * stateRealUp; } } void statevec_controlledPauliY(Qureg qureg, int controlQubit, int targetQubit) { int conjFactor = 1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor); } void statevec_controlledPauliYConj(Qureg qureg, int controlQubit, int targetQubit) { int conjFactor = -1; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPauliYKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit, conjFactor); } __global__ void statevec_phaseShiftByTermKernel(Qureg qureg, int targetQubit, qreal cosAngle, qreal sinAngle) { long long int sizeBlock, sizeHalfBlock; long long int thisBlock, indexUp,indexLo; qreal stateRealLo, stateImagLo; long long int thisTask; long long int numTasks = qureg.numAmpsPerChunk >> 1; sizeHalfBlock = 1LL << targetQubit; sizeBlock = 2LL * sizeHalfBlock; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo; } void statevec_phaseShiftByTerm(Qureg qureg, int targetQubit, Complex term) { qreal cosAngle = term.real; qreal sinAngle = term.imag; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_phaseShiftByTermKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit, cosAngle, sinAngle); } __global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, int idQubit1, int idQubit2, qreal cosAngle, qreal sinAngle) { long long int index; long long int stateVecSize; int bit1, bit2; qreal stateRealLo, stateImagLo; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_controlledPhaseShift(Qureg qureg, int idQubit1, int idQubit2, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2, cosAngle, sinAngle); } __global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { qreal stateRealLo, stateImagLo; long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateRealLo = stateVecReal[index]; stateImagLo = stateVecImag[index]; stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; } } void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle) { qreal cosAngle = cos(angle); qreal sinAngle = sin(angle); long long int mask = getQubitBitMask(controlQubits, numControlQubits); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_multiControlledPhaseShiftKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle); } __global__ void statevec_multiRotateZKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { long long int stateVecSize = qureg.numAmpsPerChunk; long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; int fac = getBitMaskParity(mask & index)? -1 : 1; qreal stateReal = stateVecReal[index]; qreal stateImag = stateVecImag[index]; stateVecReal[index] = cosAngle*stateReal + fac * sinAngle*stateImag; stateVecImag[index] = - fac * sinAngle*stateReal + cosAngle*stateImag; } void statevec_multiRotateZ(Qureg qureg, long long int mask, qreal angle) { qreal cosAngle = cos(angle/2.0); qreal sinAngle = sin(angle/2.0); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_multiRotateZKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask, cosAngle, sinAngle); } qreal densmatr_calcTotalProb(Qureg qureg) { // computes the trace using Kahan summation qreal pTotal=0; qreal y, t, c; c = 0; long long int numCols = 1LL << qureg.numQubitsRepresented; long long diagIndex; //copyStateFromGPU(qureg); for (int col=0; col< numCols; col++) { diagIndex = col*(numCols + 1); y = qureg.stateVec.real[diagIndex] - c; t = pTotal + y; c = ( t - pTotal ) - y; // brackets are important pTotal = t; } return pTotal; } qreal statevec_calcTotalProb(Qureg qureg){ /* IJB - implemented using Kahan summation for greater accuracy at a slight floating point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */ /* Don't change the bracketing in this routine! */ qreal pTotal=0; qreal y, t, c; long long int index; long long int numAmpsPerRank = qureg.numAmpsPerChunk; //copyStateFromGPU(qureg); cudaDeviceSynchronize(); c = 0.0; for (index=0; index<numAmpsPerRank; index++){ /* Perform pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; by Kahan */ // pTotal+=qureg.stateVec.real[index]*qureg.stateVec.real[index]; y = qureg.stateVec.real[index]*qureg.stateVec.real[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; /* Perform pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; by Kahan */ //pTotal+=qureg.stateVec.imag[index]*qureg.stateVec.imag[index]; y = qureg.stateVec.imag[index]*qureg.stateVec.imag[index] - c; t = pTotal + y; c = ( t - pTotal ) - y; pTotal = t; } return pTotal; } __global__ void statevec_controlledPhaseFlipKernel(Qureg qureg, int idQubit1, int idQubit2) { long long int index; long long int stateVecSize; int bit1, bit2; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; bit1 = extractBit (idQubit1, index); bit2 = extractBit (idQubit2, index); if (bit1 && bit2) { stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_controlledPhaseFlip(Qureg qureg, int idQubit1, int idQubit2) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, idQubit1, idQubit2); } __global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask) { long long int index; long long int stateVecSize; stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=stateVecSize) return; if (mask == (mask & index) ){ stateVecReal [index] = - stateVecReal [index]; stateVecImag [index] = - stateVecImag [index]; } } void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits) { int threadsPerCUDABlock, CUDABlocks; long long int mask = getQubitBitMask(controlQubits, numControlQubits); threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_multiControlledPhaseFlipKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, mask); } __global__ void statevec_swapQubitAmpsKernel(Qureg qureg, int qb1, int qb2) { qreal *reVec = qureg.stateVec.real; qreal *imVec = qureg.stateVec.imag; long long int numTasks = qureg.numAmpsPerChunk >> 2; // each iteration updates 2 amps and skips 2 amps long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; long long int ind00, ind01, ind10; qreal re01, re10, im01, im10; // determine ind00 of |..0..0..>, |..0..1..> and |..1..0..> ind00 = insertTwoZeroBits(thisTask, qb1, qb2); ind01 = flipBit(ind00, qb1); ind10 = flipBit(ind00, qb2); // extract statevec amplitudes re01 = reVec[ind01]; im01 = imVec[ind01]; re10 = reVec[ind10]; im10 = imVec[ind10]; // swap 01 and 10 amps reVec[ind01] = re10; reVec[ind10] = re01; imVec[ind01] = im10; imVec[ind10] = im01; } void statevec_swapQubitAmps(Qureg qureg, int qb1, int qb2) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>2)/threadsPerCUDABlock); statevec_swapQubitAmpsKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, qb1, qb2); } __global__ void statevec_hadamardKernel (Qureg qureg, int targetQubit){ // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block // ----- temp variables qreal stateRealUp,stateRealLo, // storage for previous state values stateImagUp,stateImagLo; // (used in updates) // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks // ---------------------------------------------------------------- // // rotate // // ---------------------------------------------------------------- // //! fix -- no necessary for GPU version qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; qreal recRoot2 = 1.0/sqrt(2.0); thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; // store current state vector values in temp variables stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateRealLo = stateVecReal[indexLo]; stateImagLo = stateVecImag[indexLo]; stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo); stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo); stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo); stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo); } void statevec_hadamard(Qureg qureg, int targetQubit) { printf("statevec_hadamard\n"); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); cudaDeviceSynchronize(); statevec_hadamardKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, targetQubit); } __global__ void statevec_controlledNotKernel(Qureg qureg, int controlQubit, int targetQubit) { long long int index; long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved long long int stateVecSize; int controlBit; // ----- temp variables qreal stateRealUp, // storage for previous state values stateImagUp; // (used in updates) long long int thisBlock, // current block indexUp,indexLo; // current index and corresponding index in lower half block sizeHalfBlock = 1LL << targetQubit; // size of blocks halved sizeBlock = 2LL * sizeHalfBlock; // size of blocks stateVecSize = qureg.numAmpsPerChunk; qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; index = blockIdx.x*blockDim.x + threadIdx.x; if (index>=(stateVecSize>>1)) return; thisBlock = index / sizeHalfBlock; indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; indexLo = indexUp + sizeHalfBlock; controlBit = extractBit(controlQubit, indexUp); if (controlBit){ stateRealUp = stateVecReal[indexUp]; stateImagUp = stateVecImag[indexUp]; stateVecReal[indexUp] = stateVecReal[indexLo]; stateVecImag[indexUp] = stateVecImag[indexLo]; stateVecReal[indexLo] = stateRealUp; stateVecImag[indexLo] = stateImagUp; } } void statevec_controlledNot(Qureg qureg, int controlQubit, int targetQubit) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_controlledNotKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, controlQubit, targetQubit); } __device__ __host__ unsigned int log2Int( unsigned int x ) { unsigned int ans = 0 ; while( x>>=1 ) ans++; return ans ; } __device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){ int i, l, r; int threadMax, maxDepth; threadMax = length/2; maxDepth = log2Int(length/2); for (i=0; i<maxDepth+1; i++){ if (threadIdx.x<threadMax){ l = threadIdx.x; r = l + threadMax; arrayIn[l] = arrayIn[r] + arrayIn[l]; } threadMax = threadMax >> 1; __syncthreads(); // optimise -- use warp shuffle instead } if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0]; } __global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){ extern __shared__ qreal tempReductionArray[]; int blockOffset = blockIdx.x*length; tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2]; tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1]; __syncthreads(); reduceBlock(tempReductionArray, reducedArray, length); } __global__ void densmatr_findProbabilityOfZeroKernel( Qureg qureg, int measureQubit, qreal *reducedArray ) { // run by each thread // use of block here refers to contiguous amplitudes where measureQubit = 0, // (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numTasks = densityDim >> 1; long long int sizeHalfBlock = 1LL << (measureQubit); long long int sizeBlock = 2LL * sizeHalfBlock; long long int thisBlock; // which block this thread is processing long long int thisTask; // which part of the block this thread is processing long long int basisIndex; // index of this thread's computational basis state long long int densityIndex; // " " index of |basis><basis| in the flat density matrix // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; // figure out which density matrix prob that this thread is assigned thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock; densityIndex = (densityDim + 1) * basisIndex; // record the probability in the CUDA-BLOCK-wide array qreal prob = qureg.stateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0 tempReductionArray[threadIdx.x] = prob; // sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } __global__ void statevec_findProbabilityOfZeroKernel( Qureg qureg, int measureQubit, qreal *reducedArray ) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity long long int numTasks=qureg.numAmpsPerChunk>>1; // (good for shared memory parallelism) extern __shared__ qreal tempReductionArray[]; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; qreal realVal, imagVal; realVal = stateVecReal[index]; imagVal = stateVecImag[index]; tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal; __syncthreads(); if (threadIdx.x<blockDim.x/2){ reduceBlock(tempReductionArray, reducedArray, blockDim.x); } } int getNumReductionLevels(long long int numValuesToReduce, int numReducedPerLevel){ int levels=0; while (numValuesToReduce){ numValuesToReduce = numValuesToReduce/numReducedPerLevel; levels++; } return levels; } void swapDouble(qreal **a, qreal **b){ qreal *temp; temp = *a; *a = *b; *b = temp; } qreal densmatr_findProbabilityOfZero(Qureg qureg, int measureQubit) { long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim >> 1; // half of the diagonal has measureQubit=0 int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { densmatr_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg, measureQubit, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal zeroProb; cudaMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return zeroProb; } qreal statevec_findProbabilityOfZero(Qureg qureg, int measureQubit) { long long int numValuesToReduce = qureg.numAmpsPerChunk>>1; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; qreal stateProb=0; int firstTime=1; int maxReducedPerLevel = REDUCE_SHARED_SIZE; while(numValuesToReduce>1){ if (numValuesToReduce<maxReducedPerLevel){ // Need less than one CUDA block to reduce values valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { // Use full CUDA blocks, with block size constrained by shared mem usage valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime){ statevec_findProbabilityOfZeroKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg, measureQubit, qureg.firstLevelReduction); firstTime=0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return stateProb; } qreal statevec_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome) { qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } qreal densmatr_calcProbOfOutcome(Qureg qureg, int measureQubit, int outcome) { qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit); if (outcome==1) outcomeProb = 1.0 - outcomeProb; return outcomeProb; } /** computes Tr(conjTrans(a) b) = sum of (a_ij^* b_ij), which is a real number */ __global__ void densmatr_calcInnerProductKernel( Qureg a, Qureg b, long long int numTermsToSum, qreal* reducedArray ) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; // Re{ conj(a) b } = Re{ (aRe - i aIm)(bRe + i bIm) } = aRe bRe + aIm bIm qreal prod = ( a.stateVec.real[index]*b.stateVec.real[index] + a.stateVec.imag[index]*b.stateVec.imag[index]); // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = prod; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } qreal densmatr_calcInnerProduct(Qureg a, Qureg b) { // we're summing the square of every term in the density matrix long long int numValuesToReduce = a.numAmpsTotal; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the terms in each block // arbitrarily store the reduction in the b qureg's array if (firstTime) { densmatr_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( a, b, a.numAmpsTotal, b.firstLevelReduction); firstTime = 0; } // sum the block terms else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( b.firstLevelReduction, b.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(b.firstLevelReduction), &(b.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal innerprod; cudaMemcpy(&innerprod, b.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return innerprod; } /** computes either a real or imag term in the inner product */ __global__ void statevec_calcInnerProductKernel( int getRealComp, qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2, long long int numTermsToSum, qreal* reducedArray) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; // choose whether to calculate the real or imaginary term of the inner product qreal innerProdTerm; if (getRealComp) innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index]; else innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index]; // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = innerProdTerm; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Terrible code which unnecessarily individually computes and sums the real and imaginary components of the * inner product, so as to not have to worry about keeping the sums separated during reduction. * Truly disgusting, probably doubles runtime, please fix. * @TODO could even do the kernel twice, storing real in bra.reduc and imag in ket.reduc? */ Complex statevec_calcInnerProduct(Qureg bra, Qureg ket) { qreal innerProdReal, innerProdImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, bra.stateVec.real, bra.stateVec.imag, ket.stateVec.real, ket.stateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = bra.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { statevec_calcInnerProductKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, bra.stateVec.real, bra.stateVec.imag, ket.stateVec.real, ket.stateVec.imag, numValuesToReduce, bra.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( bra.firstLevelReduction, bra.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // return complex Complex innerProd; innerProd.real = innerProdReal; innerProd.imag = innerProdImag; return innerProd; } /** computes one term of (vec^*T) dens * vec */ __global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) { // figure out which density matrix row to consider long long int col; long long int row = blockIdx.x*blockDim.x + threadIdx.x; if (row >= dim) return; qreal* densReal = dens.stateVec.real; qreal* densImag = dens.stateVec.imag; qreal* vecReal = vec.stateVec.real; qreal* vecImag = vec.stateVec.imag; // compute the row-th element of the product dens*vec qreal prodReal = 0; qreal prodImag = 0; for (col=0LL; col < dim; col++) { qreal densElemReal = densReal[dim*col + row]; qreal densElemImag = densImag[dim*col + row]; prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col]; prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col]; } // multiply with row-th elem of (vec^*) qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row]; // imag of every term should be zero, because each is a valid fidelity calc of an eigenstate //qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row]; extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = termReal; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } qreal densmatr_calcFidelity(Qureg qureg, Qureg pureState) { // we're summing the square of every term in the density matrix long long int densityDim = 1LL << qureg.numQubitsRepresented; long long int numValuesToReduce = densityDim; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block // store the reduction in the pureState array if (firstTime) { densmatr_calcFidelityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg, pureState, densityDim, pureState.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( pureState.firstLevelReduction, pureState.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal fidelity; cudaMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return fidelity; } __global__ void densmatr_calcHilbertSchmidtDistanceSquaredKernel( qreal* aRe, qreal* aIm, qreal* bRe, qreal* bIm, long long int numAmpsToSum, qreal *reducedArray ) { // figure out which density matrix term this thread is assigned long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numAmpsToSum) return; // compute this thread's sum term qreal difRe = aRe[index] - bRe[index]; qreal difIm = aIm[index] - bIm[index]; qreal term = difRe*difRe + difIm*difIm; // array of each thread's collected term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = term; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /* computes sqrt(Tr( (a-b) conjTrans(a-b) ) = sqrt( sum of abs vals of (a-b)) */ qreal densmatr_calcHilbertSchmidtDistance(Qureg a, Qureg b) { // we're summing the square of every term in (a-b) long long int numValuesToReduce = a.numAmpsPerChunk; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block (store reduction temp values in a's reduction array) if (firstTime) { densmatr_calcHilbertSchmidtDistanceSquaredKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( a.stateVec.real, a.stateVec.imag, b.stateVec.real, b.stateVec.imag, numValuesToReduce, a.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( a.firstLevelReduction, a.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(a.firstLevelReduction), &(a.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal trace; cudaMemcpy(&trace, a.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); qreal sqrtTrace = sqrt(trace); return sqrtTrace; } __global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) { // figure out which density matrix term this thread is assigned long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numAmpsToSum) return; qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; // array of each thread's collected probability, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = term; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } /** Computes the trace of the density matrix squared */ qreal densmatr_calcPurity(Qureg qureg) { // we're summing the square of every term in the density matrix long long int numValuesToReduce = qureg.numAmpsPerChunk; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel = REDUCE_SHARED_SIZE; int firstTime = 1; while (numValuesToReduce > 1) { // need less than one CUDA-BLOCK to reduce if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } // otherwise use only full CUDA-BLOCKS else { valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } // dictates size of reduction array sharedMemSize = valuesPerCUDABlock*sizeof(qreal); // spawn threads to sum the probs in each block if (firstTime) { densmatr_calcPurityKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( qureg.stateVec.real, qureg.stateVec.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; // sum the block probs } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } qreal traceDensSquared; cudaMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); return traceDensSquared; } __global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability) { // ----- sizes long long int sizeBlock, // size of blocks sizeHalfBlock; // size of blocks halved // ----- indices long long int thisBlock, // current block index; // current index for first half block // ----- measured probability qreal renorm; // probability (returned) value // ----- temp variables long long int thisTask; // task based approach for expose loop with small granularity // (good for shared memory parallelism) long long int numTasks=qureg.numAmpsPerChunk>>1; // ---------------------------------------------------------------- // // dimensions // // ---------------------------------------------------------------- // sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, // and then the number to skip sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) // ---------------------------------------------------------------- // // find probability // // ---------------------------------------------------------------- // // // --- task-based shared-memory parallel implementation // renorm=1/sqrt(totalProbability); qreal *stateVecReal = qureg.stateVec.real; qreal *stateVecImag = qureg.stateVec.imag; thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask>=numTasks) return; thisBlock = thisTask / sizeHalfBlock; index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; if (outcome==0){ stateVecReal[index]=stateVecReal[index]*renorm; stateVecImag[index]=stateVecImag[index]*renorm; stateVecReal[index+sizeHalfBlock]=0; stateVecImag[index+sizeHalfBlock]=0; } else if (outcome==1){ stateVecReal[index]=0; stateVecImag[index]=0; stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm; stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm; } } /* * outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or * else the state-vector will lose normalisation */ void statevec_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); statevec_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, measureQubit, outcome, outcomeProb); } /** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */ __global__ void densmatr_collapseToKnownProbOutcomeKernel( qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit, long long int part1, long long int part2, long long int part3, long long int rowBit, long long int colBit, long long int desired, long long int undesired) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numBasesToVisit) return; long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); // renormalise desired outcome vecReal[base + desired] /= outcomeProb; vecImag[base + desired] /= outcomeProb; // kill undesired outcome vecReal[base + undesired] = 0; vecImag[base + undesired] = 0; // kill |..0..><..1..| states vecReal[base + colBit] = 0; vecImag[base + colBit] = 0; vecReal[base + rowBit] = 0; vecImag[base + rowBit] = 0; } /** This involves finding |...i...><...j...| states and killing those where i!=j */ void densmatr_collapseToKnownProbOutcome(Qureg qureg, int measureQubit, int outcome, qreal outcomeProb) { int rowQubit = measureQubit + qureg.numQubitsRepresented; int colBit = 1LL << measureQubit; int rowBit = 1LL << rowQubit; long long int numBasesToVisit = qureg.numAmpsPerChunk/4; long long int part1 = colBit -1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numBasesToVisit - (rowBit >> 1); long long int desired, undesired; if (outcome == 0) { desired = 0; undesired = colBit | rowBit; } else { desired = colBit | rowBit; undesired = 0; } int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock); densmatr_collapseToKnownProbOutcomeKernel<<<CUDABlocks, threadsPerCUDABlock>>>( outcomeProb, qureg.stateVec.real, qureg.stateVec.imag, numBasesToVisit, part1, part2, part3, rowBit, colBit, desired, undesired); } __global__ void densmatr_mixDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) { long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; if (ampInd >= numAmpsToVisit) return; combineQureg.stateVec.real[ampInd] *= 1-otherProb; combineQureg.stateVec.imag[ampInd] *= 1-otherProb; combineQureg.stateVec.real[ampInd] += otherProb*otherQureg.stateVec.real[ampInd]; combineQureg.stateVec.imag[ampInd] += otherProb*otherQureg.stateVec.imag[ampInd]; } void densmatr_mixDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) { long long int numAmpsToVisit = combineQureg.numAmpsPerChunk; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixDensityMatrixKernel<<<CUDABlocks, threadsPerCUDABlock>>>( combineQureg, otherProb, otherQureg, numAmpsToVisit ); } /** Called once for every 4 amplitudes in density matrix * Works by establishing the |..0..><..0..| state (for its given index) then * visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa><rt2 NOT(X) part3| * From the brain of Simon Benjamin */ __global__ void densmatr_mixDephasingKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int colBit, long long int rowBit) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); vecReal[ampInd + colBit] *= fac; vecImag[ampInd + colBit] *= fac; vecReal[ampInd + rowBit] *= fac; vecImag[ampInd + rowBit] *= fac; } void densmatr_oneQubitDegradeOffDiagonal(Qureg qureg, int targetQubit, qreal dephFac) { long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixDephasingKernel<<<CUDABlocks, threadsPerCUDABlock>>>( dephFac, qureg.stateVec.real, qureg.stateVec.imag, numAmpsToVisit, part1, part2, part3, colBit, rowBit); } void densmatr_mixDephasing(Qureg qureg, int targetQubit, qreal dephase) { if (dephase == 0) return; qreal dephFac = 1 - dephase; densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephFac); } /** Called 12 times for every 16 amplitudes in density matrix * Each sums from the |..0..0..><..0..0..| index to visit either * |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..| * etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par><t3 0 part4 0 part5|. * From the brain of Simon Benjamin */ __global__ void densmatr_mixTwoQubitDephasingKernel( qreal fac, qreal* vecReal, qreal *vecImag, long long int numBackgroundStates, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int colBit1, long long int rowBit1, long long int colBit2, long long int rowBit2) { long long int outerInd = blockIdx.x*blockDim.x + threadIdx.x; if (outerInd >= numAmpsToVisit) return; // sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A| int meta = 1 + (outerInd/numBackgroundStates); if (meta > 4) meta++; if (meta > 9) meta++; long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2); long long int scanInd = outerInd % numBackgroundStates; long long int stateInd = ( shift + (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4)); vecReal[stateInd] *= fac; vecImag[stateInd] *= fac; } // @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems? void densmatr_mixTwoQubitDephasing(Qureg qureg, int qubit1, int qubit2, qreal dephase) { if (dephase == 0) return; // assumes qubit2 > qubit1 int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3); qreal dephFac = 1 - dephase; // refers to states |a 0 b 0 c><d 0 e 0 f| (target qubits are fixed) long long int numBackgroundStates = qureg.numAmpsPerChunk/16; // 12 of these states experience dephasing long long int numAmpsToVisit = 12 * numBackgroundStates; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixTwoQubitDephasingKernel<<<CUDABlocks, threadsPerCUDABlock>>>( dephFac, qureg.stateVec.real, qureg.stateVec.imag, numBackgroundStates, numAmpsToVisit, part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2); } /** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */ __global__ void densmatr_mixDepolarisingKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int bothBits) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); long long int targetInd = baseInd + bothBits; qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]); qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]); vecReal[baseInd] *= 1 - depolLevel; vecImag[baseInd] *= 1 - depolLevel; vecReal[targetInd] *= 1 - depolLevel; vecImag[targetInd] *= 1 - depolLevel; vecReal[baseInd] += realAvDepol; vecImag[baseInd] += imagAvDepol; vecReal[targetInd] += realAvDepol; vecImag[targetInd] += imagAvDepol; } /** Works like mixDephasing but modifies every other element, and elements are averaged in pairs */ __global__ void densmatr_mixDampingKernel( qreal damping, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int bothBits) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); long long int targetInd = baseInd + bothBits; qreal realAvDepol = damping * ( vecReal[targetInd]); qreal imagAvDepol = damping * ( vecImag[targetInd]); vecReal[targetInd] *= 1 - damping; vecImag[targetInd] *= 1 - damping; vecReal[baseInd] += realAvDepol; vecImag[baseInd] += imagAvDepol; } void densmatr_mixDepolarising(Qureg qureg, int targetQubit, qreal depolLevel) { if (depolLevel == 0) return; densmatr_mixDephasing(qureg, targetQubit, depolLevel); long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int bothBits = colBit | rowBit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixDepolarisingKernel<<<CUDABlocks, threadsPerCUDABlock>>>( depolLevel, qureg.stateVec.real, qureg.stateVec.imag, numAmpsToVisit, part1, part2, part3, bothBits); } void densmatr_mixDamping(Qureg qureg, int targetQubit, qreal damping) { if (damping == 0) return; qreal dephase = sqrt(1-damping); densmatr_oneQubitDegradeOffDiagonal(qureg, targetQubit, dephase); long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; int rowQubit = targetQubit + qureg.numQubitsRepresented; long long int colBit = 1LL << targetQubit; long long int rowBit = 1LL << rowQubit; long long int bothBits = colBit | rowBit; long long int part1 = colBit - 1; long long int part2 = (rowBit >> 1) - colBit; long long int part3 = numAmpsToVisit - (rowBit >> 1); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixDampingKernel<<<CUDABlocks, threadsPerCUDABlock>>>( damping, qureg.stateVec.real, qureg.stateVec.imag, numAmpsToVisit, part1, part2, part3, bothBits); } /** Called once for every 16 amplitudes */ __global__ void densmatr_mixTwoQubitDepolarisingKernel( qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, long long int part1, long long int part2, long long int part3, long long int part4, long long int part5, long long int rowCol1, long long int rowCol2) { long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; if (scanInd >= numAmpsToVisit) return; // index of |..0..0..><..0..0| long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4); long long int ind01 = ind00 + rowCol1; long long int ind10 = ind00 + rowCol2; long long int ind11 = ind00 + rowCol1 + rowCol2; qreal realAvDepol = depolLevel * 0.25 * ( vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]); qreal imagAvDepol = depolLevel * 0.25 * ( vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]); qreal retain = 1 - depolLevel; vecReal[ind00] *= retain; vecImag[ind00] *= retain; vecReal[ind01] *= retain; vecImag[ind01] *= retain; vecReal[ind10] *= retain; vecImag[ind10] *= retain; vecReal[ind11] *= retain; vecImag[ind11] *= retain; vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol; vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol; vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol; vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol; } void densmatr_mixTwoQubitDepolarising(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) { if (depolLevel == 0) return; // assumes qubit2 > qubit1 densmatr_mixTwoQubitDephasing(qureg, qubit1, qubit2, depolLevel); int rowQubit1 = qubit1 + qureg.numQubitsRepresented; int rowQubit2 = qubit2 + qureg.numQubitsRepresented; long long int colBit1 = 1LL << qubit1; long long int rowBit1 = 1LL << rowQubit1; long long int colBit2 = 1LL << qubit2; long long int rowBit2 = 1LL << rowQubit2; long long int rowCol1 = colBit1 | rowBit1; long long int rowCol2 = colBit2 | rowBit2; long long int numAmpsToVisit = qureg.numAmpsPerChunk/16; long long int part1 = colBit1 - 1; long long int part2 = (colBit2 >> 1) - colBit1; long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); long long int part5 = numAmpsToVisit - (rowBit2 >> 3); int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); densmatr_mixTwoQubitDepolarisingKernel<<<CUDABlocks, threadsPerCUDABlock>>>( depolLevel, qureg.stateVec.real, qureg.stateVec.imag, numAmpsToVisit, part1, part2, part3, part4, part5, rowCol1, rowCol2); } __global__ void statevec_setWeightedQuregKernel(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) { long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; long long int numAmpsToVisit = qureg1.numAmpsPerChunk; if (ampInd >= numAmpsToVisit) return; qreal *vecRe1 = qureg1.stateVec.real; qreal *vecIm1 = qureg1.stateVec.imag; qreal *vecRe2 = qureg2.stateVec.real; qreal *vecIm2 = qureg2.stateVec.imag; qreal *vecReOut = out.stateVec.real; qreal *vecImOut = out.stateVec.imag; qreal facRe1 = fac1.real; qreal facIm1 = fac1.imag; qreal facRe2 = fac2.real; qreal facIm2 = fac2.imag; qreal facReOut = facOut.real; qreal facImOut = facOut.imag; qreal re1,im1, re2,im2, reOut,imOut; long long int index = ampInd; re1 = vecRe1[index]; im1 = vecIm1[index]; re2 = vecRe2[index]; im2 = vecIm2[index]; reOut = vecReOut[index]; imOut = vecImOut[index]; vecReOut[index] = (facReOut*reOut - facImOut*imOut) + (facRe1*re1 - facIm1*im1) + (facRe2*re2 - facIm2*im2); vecImOut[index] = (facReOut*imOut + facImOut*reOut) + (facRe1*im1 + facIm1*re1) + (facRe2*im2 + facIm2*re2); } void statevec_setWeightedQureg(Complex fac1, Qureg qureg1, Complex fac2, Qureg qureg2, Complex facOut, Qureg out) { long long int numAmpsToVisit = qureg1.numAmpsPerChunk; int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); statevec_setWeightedQuregKernel<<<CUDABlocks, threadsPerCUDABlock>>>( fac1, qureg1, fac2, qureg2, facOut, out ); } __global__ void statevec_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) { // each thread modifies one value; a wasteful and inefficient strategy long long int numTasks = qureg.numAmpsPerChunk; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask >= numTasks) return; qreal* stateRe = qureg.stateVec.real; qreal* stateIm = qureg.stateVec.imag; qreal* opRe = op.deviceOperator.real; qreal* opIm = op.deviceOperator.imag; qreal a = stateRe[thisTask]; qreal b = stateIm[thisTask]; qreal c = opRe[thisTask]; qreal d = opIm[thisTask]; // (a + b i)(c + d i) = (a c - b d) + i (a d + b c) stateRe[thisTask] = a*c - b*d; stateIm[thisTask] = a*d + b*c; } void statevec_applyDiagonalOp(Qureg qureg, DiagonalOp op) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); statevec_applyDiagonalOpKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, op); } __global__ void densmatr_applyDiagonalOpKernel(Qureg qureg, DiagonalOp op) { // each thread modifies one value; a wasteful and inefficient strategy long long int numTasks = qureg.numAmpsPerChunk; long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; if (thisTask >= numTasks) return; qreal* stateRe = qureg.stateVec.real; qreal* stateIm = qureg.stateVec.imag; qreal* opRe = op.deviceOperator.real; qreal* opIm = op.deviceOperator.imag; int opDim = (1 << op.numQubits); qreal a = stateRe[thisTask]; qreal b = stateIm[thisTask]; qreal c = opRe[thisTask % opDim]; qreal d = opIm[thisTask % opDim]; // (a + b i)(c + d i) = (a c - b d) + i (a d + b c) stateRe[thisTask] = a*c - b*d; stateIm[thisTask] = a*d + b*c; } void densmatr_applyDiagonalOp(Qureg qureg, DiagonalOp op) { int threadsPerCUDABlock, CUDABlocks; threadsPerCUDABlock = 128; CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); densmatr_applyDiagonalOpKernel<<<CUDABlocks, threadsPerCUDABlock>>>(qureg, op); } /** computes either a real or imag term of |vec_i|^2 op_i */ __global__ void statevec_calcExpecDiagonalOpKernel( int getRealComp, qreal* vecReal, qreal* vecImag, qreal* opReal, qreal* opImag, long long int numTermsToSum, qreal* reducedArray) { long long int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= numTermsToSum) return; qreal vecAbs = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; // choose whether to calculate the real or imaginary term of the expec term qreal expecVal; if (getRealComp) expecVal = vecAbs * opReal[index]; else expecVal = vecAbs * opImag[index]; // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = expecVal; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } Complex statevec_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) { /* @TODO: remove all this reduction boilerplate from QuEST GPU * (e.g. a func which accepts a pointer to do every-value reduction?) */ qreal expecReal, expecImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { statevec_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, qureg.stateVec.real, qureg.stateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { statevec_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, qureg.stateVec.real, qureg.stateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // return complex Complex expecVal; expecVal.real = expecReal; expecVal.imag = expecImag; return expecVal; } __global__ void densmatr_calcExpecDiagonalOpKernel( int getRealComp, qreal* matReal, qreal* matImag, qreal* opReal, qreal* opImag, int numQubits, long long int numTermsToSum, qreal* reducedArray) { /** if the thread represents a diagonal op, then it computes either a * real or imag term of matr_{ii} op_i. Otherwise, it writes a 0 to the * reduction array */ // index will identy one of the 2^Q diagonals to be summed long long int matInd = blockIdx.x*blockDim.x + threadIdx.x; if (matInd >= numTermsToSum) return; long long int diagSpacing = (1LL << numQubits) + 1LL; int isDiag = ((matInd % diagSpacing) == 0); long long int opInd = matInd / diagSpacing; qreal val = 0; if (isDiag) { qreal matRe = matReal[matInd]; qreal matIm = matImag[matInd]; qreal opRe = opReal[opInd]; qreal opIm = opImag[opInd]; // (matRe + matIm i)(opRe + opIm i) = // (matRe opRe - matIm opIm) + i (matRe opIm + matIm opRe) if (getRealComp) val = matRe * opRe - matIm * opIm; else val = matRe * opIm + matIm * opRe; } // array of each thread's collected sum term, to be summed extern __shared__ qreal tempReductionArray[]; tempReductionArray[threadIdx.x] = val; __syncthreads(); // every second thread reduces if (threadIdx.x<blockDim.x/2) reduceBlock(tempReductionArray, reducedArray, blockDim.x); } Complex densmatr_calcExpecDiagonalOp(Qureg qureg, DiagonalOp op) { /* @TODO: remove all this reduction boilerplate from QuEST GPU * (e.g. a func which accepts a pointer to do every-value reduction?) */ qreal expecReal, expecImag; int getRealComp; long long int numValuesToReduce; int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; int maxReducedPerLevel; int firstTime; // compute real component of inner product getRealComp = 1; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { densmatr_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, qureg.stateVec.real, qureg.stateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, op.numQubits, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&expecReal, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // compute imag component of inner product getRealComp = 0; numValuesToReduce = qureg.numAmpsPerChunk; maxReducedPerLevel = REDUCE_SHARED_SIZE; firstTime = 1; while (numValuesToReduce > 1) { if (numValuesToReduce < maxReducedPerLevel) { valuesPerCUDABlock = numValuesToReduce; numCUDABlocks = 1; } else { valuesPerCUDABlock = maxReducedPerLevel; numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); } sharedMemSize = valuesPerCUDABlock*sizeof(qreal); if (firstTime) { densmatr_calcExpecDiagonalOpKernel<<<numCUDABlocks, valuesPerCUDABlock, sharedMemSize>>>( getRealComp, qureg.stateVec.real, qureg.stateVec.imag, op.deviceOperator.real, op.deviceOperator.imag, op.numQubits, numValuesToReduce, qureg.firstLevelReduction); firstTime = 0; } else { cudaDeviceSynchronize(); copySharedReduceBlock<<<numCUDABlocks, valuesPerCUDABlock/2, sharedMemSize>>>( qureg.firstLevelReduction, qureg.secondLevelReduction, valuesPerCUDABlock); cudaDeviceSynchronize(); swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); } numValuesToReduce = numValuesToReduce/maxReducedPerLevel; } cudaMemcpy(&expecImag, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); // return complex Complex expecVal; expecVal.real = expecReal; expecVal.imag = expecImag; return expecVal; } void agnostic_setDiagonalOpElems(DiagonalOp op, long long int startInd, qreal* real, qreal* imag, long long int numElems) { // update both RAM and VRAM, for consistency memcpy(&op.real[startInd], real, numElems * sizeof(qreal)); memcpy(&op.imag[startInd], imag, numElems * sizeof(qreal)); cudaDeviceSynchronize(); cudaMemcpy( op.deviceOperator.real + startInd, real, numElems * sizeof(*(op.deviceOperator.real)), cudaMemcpyHostToDevice); cudaMemcpy( op.deviceOperator.imag + startInd, imag, numElems * sizeof(*(op.deviceOperator.imag)), cudaMemcpyHostToDevice); } void seedQuESTDefault(){ // init MT random number generator with three keys -- time and pid // for the MPI version, it is ok that all procs will get the same seed as random numbers will only be // used by the master process unsigned long int key[2]; getQuESTDefaultSeedKey(key); init_by_array(key, 2); } #ifdef __cplusplus } #endif
048a822d13755a4e110690e29332ad720fb2f146.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <cusparse_v2.h> #include "rocblas.h" #include <hiprand/hiprand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" //poison log likelihood for one observation __device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt, KC_FP_TYPE sh) { KC_FP_TYPE r = KC_MAX(KC_MINN,KC_MIN(KC_MAXN,log1p(KC_EXP(g*x))*KC_EXP(sh))); return y*(KC_LOG(r)+KC_LOG(dt)) - dt*r - KC_GAMMALN(y+1.0); } //sums up log likelihood of each trial given model parameters __global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < 1) { log_p[0] = 0; for(int ii = 0; ii < NT; ii++) { log_p[0] += log_p_tr[ii]; } } } //averages log likelihood of each simulated path // (one thread for each trial) __global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { log_p_tr[idx] = 0; KC_FP_TYPE trSum = 0; KC_FP_TYPE log_x = 0; log_p_tr[idx] = KC_SQRT(-1.0); //computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial // does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) { trSum = 1 ; log_x = log_p[ii*NT+idx]; for(int kk = 0; kk < ii; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } for(int kk = ii+1; kk < nSims; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) { log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum); break; } } } } //simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood __global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, KC_FP_TYPE * spe) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT ) { int trNum = idx; int T1 = trIdx[trNum]; //xx contains zero mean Gaussian noise of variance \omega^2 xx[T1] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial int currIdx = sim*(NT)+idx; log_p[currIdx] = lh(y[T1],xx[T1],g,dt,spe[T1]); for(int ii = T1+1; ii < trIdx[trNum+1];ii++) { //progates particle forward in time xx[ii] = (xx[ii-1] >= 1.0)?1.0:KC_MIN(xx[ii] + xx[ii-1]+b[betaIdx[ii]],1.0); //log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii] log_p[currIdx] += lh(y[ii],xx[ii],g,dt,spe[ii]); } } } //Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters // This estimation is made by Monte Carlo simulations from the model to integrate out latent variable //args // 0 = y (observations) // 1 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 2 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) // 3 = spike history effect (same size as y) // 4 = beta values // 5 = w (variance of diffusion process) // 6 = l_0 (starting lambda value) // 7 = g (absorbing boundary effective height) // 8 = dt (bin size in seconds) // 9 = number of samples to use to estimate log probability of observations (I recommend using at least 1000) //outputs (left-hand side) // 0 = log p(y|\theta) // 1 = log p(y|\theta) for each individual trial void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { hipError_t ce; //load up trial data unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * y = kcGetArrayData(prhs[0]); int * trIdx = kcGetArrayDataInt(prhs[1]); unsigned int NT = kcGetArrayNumEl(prhs[1])-1; int * betaIdx = kcGetArrayDataInt(prhs[2],TT); // load spike history effect KC_FP_TYPE * spe = kcGetArrayData(prhs[3]); //how many simulations to use to estimate log p(y|\theta) int trialsToSim = (int)mxGetScalar(prhs[9]); //load up parameters to simulate model if(mxGetClassID(prhs[4]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!"); } KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[4]); int numBetas = mxGetNumberOfElements(prhs[4]); KC_FP_TYPE * b_gpu; ce = hipMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); if(ce != hipSuccess) { mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } checkCudaErrors(hipMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,hipMemcpyHostToDevice)); KC_FP_TYPE w = mxGetScalar(prhs[5]); KC_FP_TYPE l_0 = mxGetScalar(prhs[6]); KC_FP_TYPE g = mxGetScalar(prhs[7]); KC_FP_TYPE dt = mxGetScalar(prhs[8]); //setup CUDA variables + random number generator int randSize = TT + (((TT)%2==0)?0:1); KC_FP_TYPE * xx; checkCudaErrors(hipMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE))); hiprandGenerator_t curandGen = 0; hiprandStatus_t hiprandStatus_t; hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND-1 error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors"); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND-2 error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors"); } int blockSize = 2; int nBlocks = NT/blockSize + ((NT%blockSize==0)?0:1); int blockSizeT = 2; int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1); //allocates sspace on GPU for simulating the likelihood KC_FP_TYPE * log_p; //KC_FP_TYPE * log_p_2; KC_FP_TYPE * log_p_tr; KC_FP_TYPE * sum_log_p; checkCudaErrors(hipMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim)); //checkCudaErrors(hipMalloc((void**)&log_p_2,sizeof(KC_FP_TYPE)*NT*trialsToSim)); checkCudaErrors(hipMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT)); checkCudaErrors(hipMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1)); // generate AR1 noise for(int kk = 0; kk < trialsToSim; kk++) { //generates zero mean Gaussian noise with correct variance hiprandStatus_t = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w)); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND gen error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors"); } //checkCudaErrors(hipDeviceSynchronize()); //calculate path + logP hipLaunchKernelGGL(( kcSimGBPaths), dim3(nBlocks),dim3(blockSize), 0, 0, y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,spe); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error in simulating of kcSimGaussianBound.cu "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA errors"); } } // log_p_2 = log_p; //average likelihood of each sampled path to get log p(y|\theta) for each trial hipLaunchKernelGGL(( kcSumGBlogpTr), dim3(nBlocksT),dim3(blockSizeT), 0, 0, log_p,log_p_tr,NT,trialsToSim); checkCudaErrors(hipDeviceSynchronize()); //sums up log likelihood of each trial hipLaunchKernelGGL(( kcSumGBfinal), dim3(1),dim3(1), 0, 0, log_p_tr,sum_log_p,NT); checkCudaErrors(hipDeviceSynchronize()); //copy back to host if(nlhs > 0) { plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),hipMemcpyDeviceToHost)); } if(nlhs > 1) { plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p_tr,NT*sizeof(KC_FP_TYPE),hipMemcpyDeviceToHost)); } //free up CUDA variables checkCudaErrors(hiprandDestroyGenerator(curandGen)); checkCudaErrors(hipFree(xx)); checkCudaErrors(hipFree(b_gpu)); checkCudaErrors(hipFree(log_p)); checkCudaErrors(hipFree(log_p_tr)); checkCudaErrors(hipFree(sum_log_p)); }
048a822d13755a4e110690e29332ad720fb2f146.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <cusparse_v2.h> #include "cublas_v2.h" #include <curand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" //poison log likelihood for one observation __device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt, KC_FP_TYPE sh) { KC_FP_TYPE r = KC_MAX(KC_MINN,KC_MIN(KC_MAXN,log1p(KC_EXP(g*x))*KC_EXP(sh))); return y*(KC_LOG(r)+KC_LOG(dt)) - dt*r - KC_GAMMALN(y+1.0); } //sums up log likelihood of each trial given model parameters __global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < 1) { log_p[0] = 0; for(int ii = 0; ii < NT; ii++) { log_p[0] += log_p_tr[ii]; } } } //averages log likelihood of each simulated path // (one thread for each trial) __global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { log_p_tr[idx] = 0; KC_FP_TYPE trSum = 0; KC_FP_TYPE log_x = 0; log_p_tr[idx] = KC_SQRT(-1.0); //computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial // does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) { trSum = 1 ; log_x = log_p[ii*NT+idx]; for(int kk = 0; kk < ii; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } for(int kk = ii+1; kk < nSims; kk++) { trSum += KC_EXP(log_p[kk*NT+idx] - log_x); } if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) { log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum); break; } } } } //simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood __global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, KC_FP_TYPE * spe) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT ) { int trNum = idx; int T1 = trIdx[trNum]; //xx contains zero mean Gaussian noise of variance \omega^2 xx[T1] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial int currIdx = sim*(NT)+idx; log_p[currIdx] = lh(y[T1],xx[T1],g,dt,spe[T1]); for(int ii = T1+1; ii < trIdx[trNum+1];ii++) { //progates particle forward in time xx[ii] = (xx[ii-1] >= 1.0)?1.0:KC_MIN(xx[ii] + xx[ii-1]+b[betaIdx[ii]],1.0); //log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii] log_p[currIdx] += lh(y[ii],xx[ii],g,dt,spe[ii]); } } } //Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters // This estimation is made by Monte Carlo simulations from the model to integrate out latent variable //args // 0 = y (observations) // 1 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 2 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) // 3 = spike history effect (same size as y) // 4 = beta values // 5 = w (variance of diffusion process) // 6 = l_0 (starting lambda value) // 7 = g (absorbing boundary effective height) // 8 = dt (bin size in seconds) // 9 = number of samples to use to estimate log probability of observations (I recommend using at least 1000) //outputs (left-hand side) // 0 = log p(y|\theta) // 1 = log p(y|\theta) for each individual trial void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { cudaError_t ce; //load up trial data unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * y = kcGetArrayData(prhs[0]); int * trIdx = kcGetArrayDataInt(prhs[1]); unsigned int NT = kcGetArrayNumEl(prhs[1])-1; int * betaIdx = kcGetArrayDataInt(prhs[2],TT); // load spike history effect KC_FP_TYPE * spe = kcGetArrayData(prhs[3]); //how many simulations to use to estimate log p(y|\theta) int trialsToSim = (int)mxGetScalar(prhs[9]); //load up parameters to simulate model if(mxGetClassID(prhs[4]) != KC_FP_TYPE_MATLAB) { mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!"); } KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[4]); int numBetas = mxGetNumberOfElements(prhs[4]); KC_FP_TYPE * b_gpu; ce = cudaMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); if(ce != cudaSuccess) { mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } checkCudaErrors(cudaMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,cudaMemcpyHostToDevice)); KC_FP_TYPE w = mxGetScalar(prhs[5]); KC_FP_TYPE l_0 = mxGetScalar(prhs[6]); KC_FP_TYPE g = mxGetScalar(prhs[7]); KC_FP_TYPE dt = mxGetScalar(prhs[8]); //setup CUDA variables + random number generator int randSize = TT + (((TT)%2==0)?0:1); KC_FP_TYPE * xx; checkCudaErrors(cudaMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE))); curandGenerator_t curandGen = 0; curandStatus_t curandStatus; curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND-1 error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors"); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND-2 error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors"); } int blockSize = 2; int nBlocks = NT/blockSize + ((NT%blockSize==0)?0:1); int blockSizeT = 2; int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1); //allocates sspace on GPU for simulating the likelihood KC_FP_TYPE * log_p; //KC_FP_TYPE * log_p_2; KC_FP_TYPE * log_p_tr; KC_FP_TYPE * sum_log_p; checkCudaErrors(cudaMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim)); //checkCudaErrors(cudaMalloc((void**)&log_p_2,sizeof(KC_FP_TYPE)*NT*trialsToSim)); checkCudaErrors(cudaMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT)); checkCudaErrors(cudaMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1)); // generate AR1 noise for(int kk = 0; kk < trialsToSim; kk++) { //generates zero mean Gaussian noise with correct variance curandStatus = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w)); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND gen error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors"); } //checkCudaErrors(cudaDeviceSynchronize()); //calculate path + logP kcSimGBPaths<<<nBlocks,blockSize>>>(y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,spe); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error in simulating of kcSimGaussianBound.cu "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA errors"); } } // log_p_2 = log_p; //average likelihood of each sampled path to get log p(y|\theta) for each trial kcSumGBlogpTr<<<nBlocksT,blockSizeT>>>(log_p,log_p_tr,NT,trialsToSim); checkCudaErrors(cudaDeviceSynchronize()); //sums up log likelihood of each trial kcSumGBfinal<<<1,1>>>(log_p_tr,sum_log_p,NT); checkCudaErrors(cudaDeviceSynchronize()); //copy back to host if(nlhs > 0) { plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost)); } if(nlhs > 1) { plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p_tr,NT*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost)); } //free up CUDA variables checkCudaErrors(curandDestroyGenerator(curandGen)); checkCudaErrors(cudaFree(xx)); checkCudaErrors(cudaFree(b_gpu)); checkCudaErrors(cudaFree(log_p)); checkCudaErrors(cudaFree(log_p_tr)); checkCudaErrors(cudaFree(sum_log_p)); }
c5e975f032b2d4852487ea7cf1ca7cee6beed1b6.hip
// !!! This is a file automatically generated by hipify!!! #include <unittest/unittest.h> #if defined(__HIPCC__) #include <thrust/experimental/arch.h> using namespace thrust::experimental::arch; void set_compute_capability(hipDeviceProp_t& properties, int major, int minor) { properties.major = major; properties.minor = minor; } void set_G80(hipDeviceProp_t& properties) { set_compute_capability(properties, 1, 0); properties.multiProcessorCount = 16; properties.sharedMemPerBlock = 16384; properties.regsPerBlock = 8192; properties.warpSize = 32; properties.maxThreadsPerBlock = 512; } void set_G84(hipDeviceProp_t& properties) { set_compute_capability(properties, 1, 1); properties.multiProcessorCount = 4; properties.sharedMemPerBlock = 16384; properties.regsPerBlock = 8192; properties.warpSize = 32; properties.maxThreadsPerBlock = 512; } void set_GT200(hipDeviceProp_t& properties) { set_compute_capability(properties, 1, 3); properties.multiProcessorCount = 30; properties.sharedMemPerBlock = 16384; properties.regsPerBlock = 16384; properties.warpSize = 32; properties.maxThreadsPerBlock = 512; } void set_func_attributes(hipFuncAttributes& attributes, size_t constSizeBytes, // Size of constant memory in bytes. size_t localSizeBytes, // Size of local memory in bytes. int maxThreadsPerBlock, // Maximum number of threads per block. int numRegs, // Number of registers used. size_t sharedSizeBytes) // Size of shared memory in bytes. { attributes.constSizeBytes = constSizeBytes; attributes.localSizeBytes = localSizeBytes; attributes.maxThreadsPerBlock = maxThreadsPerBlock; attributes.numRegs = numRegs; attributes.sharedSizeBytes = sharedSizeBytes; } void TestMaxActiveThreads(void) { hipDeviceProp_t properties; set_compute_capability(properties, 1, 0); ASSERT_EQUAL(max_active_threads_per_multiprocessor(properties), 768); set_compute_capability(properties, 1, 1); ASSERT_EQUAL(max_active_threads_per_multiprocessor(properties), 768); set_compute_capability(properties, 1, 2); ASSERT_EQUAL(max_active_threads_per_multiprocessor(properties), 1024); set_compute_capability(properties, 1, 3); ASSERT_EQUAL(max_active_threads_per_multiprocessor(properties), 1024); } DECLARE_UNITTEST(TestMaxActiveThreads); void TestMaxActiveBlocks(void) { hipDeviceProp_t properties; hipFuncAttributes attributes; // Kernel #1 : Full Occupancy on all devices set_func_attributes(attributes, 0, 0, 512, 10, 2048); set_G80(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 3); set_G84(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 3); set_GT200(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 4); // Kernel #2 : 2/3rds Occupancy on G8x and 100% on GT200 set_func_attributes(attributes, 0, 0, 512, 16, 2048); set_G80(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); set_G84(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); set_GT200(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 4); // Kernel #3 : 1/3rds Occupancy on G8x and 75% on GT200 set_func_attributes(attributes, 0, 0, 512, 20, 2048); set_G80(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 1); set_G84(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 1); set_GT200(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 3); // Kernel #4 : 1/3rds Occupancy on G8x and 50% on GT200 set_func_attributes(attributes, 0, 0, 512, 21, 2048); set_G80(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 1); set_G84(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 1); set_GT200(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); // Kernel #5 : 2/3rds Occupancy on G8x and 50% on GT200 set_func_attributes(attributes, 0, 0, 512, 10, 8192); set_G80(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); set_G84(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); set_GT200(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); } DECLARE_UNITTEST(TestMaxActiveBlocks); void TestMaxBlocksizeWithHighestOccupancy(void) { hipDeviceProp_t properties; hipFuncAttributes attributes; // Kernel #1 : Full Occupancy on all devices set_func_attributes(attributes, 0, 0, 512, 10, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 384); set_GT200(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 512); // Kernel #2 : 2/3rds Occupancy on G8x and 100% on GT200 set_func_attributes(attributes, 0, 0, 512, 16, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 512); set_GT200(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 512); // Kernel #3 : 50% Occupancy on G8x and 75% on GT200 set_func_attributes(attributes, 0, 0, 256, 20, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 192); set_GT200(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 256); // Kernel #4 : 1/3rds Occupancy on G8x and 50% on GT200 set_func_attributes(attributes, 0, 0, 384, 26, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 256); set_GT200(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 192); // Kernel #5 :100% Occupancy on G8x and GT200 set_func_attributes(attributes, 0, 0, 512, 10, 8192); set_G80(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 384); set_GT200(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 512); } DECLARE_UNITTEST(TestMaxBlocksizeWithHighestOccupancy); void TestMaxBlocksize(void) { hipDeviceProp_t properties; hipFuncAttributes attributes; // Kernel #1 : Full Occupancy on all devices set_func_attributes(attributes, 0, 0, 512, 10, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); set_GT200(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); // Kernel #2 : 2/3rds Occupancy on G8x and 100% on GT200 set_func_attributes(attributes, 0, 0, 512, 16, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); set_GT200(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); // Kernel #3 : 50% Occupancy on G8x and 75% on GT200 set_func_attributes(attributes, 0, 0, 512, 20, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 384); set_GT200(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); // Kernel #4 : 1/3rds Occupancy on G8x and 50% on GT200 set_func_attributes(attributes, 0, 0, 384, 26, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 256); set_GT200(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 384); // Kernel #5 :100% Occupancy on G8x and GT200 set_func_attributes(attributes, 0, 0, 512, 10, 8192); set_G80(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); set_GT200(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); } DECLARE_UNITTEST(TestMaxBlocksize); #endif // defined(__HIPCC__)
c5e975f032b2d4852487ea7cf1ca7cee6beed1b6.cu
#include <unittest/unittest.h> #if defined(__CUDACC__) #include <thrust/experimental/arch.h> using namespace thrust::experimental::arch; void set_compute_capability(cudaDeviceProp& properties, int major, int minor) { properties.major = major; properties.minor = minor; } void set_G80(cudaDeviceProp& properties) { set_compute_capability(properties, 1, 0); properties.multiProcessorCount = 16; properties.sharedMemPerBlock = 16384; properties.regsPerBlock = 8192; properties.warpSize = 32; properties.maxThreadsPerBlock = 512; } void set_G84(cudaDeviceProp& properties) { set_compute_capability(properties, 1, 1); properties.multiProcessorCount = 4; properties.sharedMemPerBlock = 16384; properties.regsPerBlock = 8192; properties.warpSize = 32; properties.maxThreadsPerBlock = 512; } void set_GT200(cudaDeviceProp& properties) { set_compute_capability(properties, 1, 3); properties.multiProcessorCount = 30; properties.sharedMemPerBlock = 16384; properties.regsPerBlock = 16384; properties.warpSize = 32; properties.maxThreadsPerBlock = 512; } void set_func_attributes(cudaFuncAttributes& attributes, size_t constSizeBytes, // Size of constant memory in bytes. size_t localSizeBytes, // Size of local memory in bytes. int maxThreadsPerBlock, // Maximum number of threads per block. int numRegs, // Number of registers used. size_t sharedSizeBytes) // Size of shared memory in bytes. { attributes.constSizeBytes = constSizeBytes; attributes.localSizeBytes = localSizeBytes; attributes.maxThreadsPerBlock = maxThreadsPerBlock; attributes.numRegs = numRegs; attributes.sharedSizeBytes = sharedSizeBytes; } void TestMaxActiveThreads(void) { cudaDeviceProp properties; set_compute_capability(properties, 1, 0); ASSERT_EQUAL(max_active_threads_per_multiprocessor(properties), 768); set_compute_capability(properties, 1, 1); ASSERT_EQUAL(max_active_threads_per_multiprocessor(properties), 768); set_compute_capability(properties, 1, 2); ASSERT_EQUAL(max_active_threads_per_multiprocessor(properties), 1024); set_compute_capability(properties, 1, 3); ASSERT_EQUAL(max_active_threads_per_multiprocessor(properties), 1024); } DECLARE_UNITTEST(TestMaxActiveThreads); void TestMaxActiveBlocks(void) { cudaDeviceProp properties; cudaFuncAttributes attributes; // Kernel #1 : Full Occupancy on all devices set_func_attributes(attributes, 0, 0, 512, 10, 2048); set_G80(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 3); set_G84(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 3); set_GT200(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 4); // Kernel #2 : 2/3rds Occupancy on G8x and 100% on GT200 set_func_attributes(attributes, 0, 0, 512, 16, 2048); set_G80(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); set_G84(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); set_GT200(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 4); // Kernel #3 : 1/3rds Occupancy on G8x and 75% on GT200 set_func_attributes(attributes, 0, 0, 512, 20, 2048); set_G80(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 1); set_G84(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 1); set_GT200(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 3); // Kernel #4 : 1/3rds Occupancy on G8x and 50% on GT200 set_func_attributes(attributes, 0, 0, 512, 21, 2048); set_G80(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 1); set_G84(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 1); set_GT200(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); // Kernel #5 : 2/3rds Occupancy on G8x and 50% on GT200 set_func_attributes(attributes, 0, 0, 512, 10, 8192); set_G80(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); set_G84(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); set_GT200(properties); ASSERT_EQUAL(max_active_blocks_per_multiprocessor(properties, attributes, 256, 0), 2); } DECLARE_UNITTEST(TestMaxActiveBlocks); void TestMaxBlocksizeWithHighestOccupancy(void) { cudaDeviceProp properties; cudaFuncAttributes attributes; // Kernel #1 : Full Occupancy on all devices set_func_attributes(attributes, 0, 0, 512, 10, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 384); set_GT200(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 512); // Kernel #2 : 2/3rds Occupancy on G8x and 100% on GT200 set_func_attributes(attributes, 0, 0, 512, 16, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 512); set_GT200(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 512); // Kernel #3 : 50% Occupancy on G8x and 75% on GT200 set_func_attributes(attributes, 0, 0, 256, 20, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 192); set_GT200(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 256); // Kernel #4 : 1/3rds Occupancy on G8x and 50% on GT200 set_func_attributes(attributes, 0, 0, 384, 26, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 256); set_GT200(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 192); // Kernel #5 :100% Occupancy on G8x and GT200 set_func_attributes(attributes, 0, 0, 512, 10, 8192); set_G80(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 384); set_GT200(properties); ASSERT_EQUAL(max_blocksize_with_highest_occupancy(properties, attributes), 512); } DECLARE_UNITTEST(TestMaxBlocksizeWithHighestOccupancy); void TestMaxBlocksize(void) { cudaDeviceProp properties; cudaFuncAttributes attributes; // Kernel #1 : Full Occupancy on all devices set_func_attributes(attributes, 0, 0, 512, 10, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); set_GT200(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); // Kernel #2 : 2/3rds Occupancy on G8x and 100% on GT200 set_func_attributes(attributes, 0, 0, 512, 16, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); set_GT200(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); // Kernel #3 : 50% Occupancy on G8x and 75% on GT200 set_func_attributes(attributes, 0, 0, 512, 20, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 384); set_GT200(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); // Kernel #4 : 1/3rds Occupancy on G8x and 50% on GT200 set_func_attributes(attributes, 0, 0, 384, 26, 2048); set_G80(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 256); set_GT200(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 384); // Kernel #5 :100% Occupancy on G8x and GT200 set_func_attributes(attributes, 0, 0, 512, 10, 8192); set_G80(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); set_GT200(properties); ASSERT_EQUAL(max_blocksize(properties, attributes), 512); } DECLARE_UNITTEST(TestMaxBlocksize); #endif // defined(__CUDACC__)
677e053afc8b00384a2487f1b69d1a5f88d50c10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void scale_random(float *random, size_t total_size){ size_t index = blockIdx.x * blockDim.x + threadIdx.x; if(index < total_size){ random[index] = random[index] * 2.0 - 1.0; __syncthreads(); } }
677e053afc8b00384a2487f1b69d1a5f88d50c10.cu
#include "includes.h" __global__ void scale_random(float *random, size_t total_size){ size_t index = blockIdx.x * blockDim.x + threadIdx.x; if(index < total_size){ random[index] = random[index] * 2.0 - 1.0; __syncthreads(); } }
420612870899fbcaa13997b35a85a2feb03cfa1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # ifndef OCTAVES # define OCTAVES 8 # endif # define clamp(a, low, high) (((a) >= (low) && (low) <= (high)) ? (a) : ((a) < (low) ? (low) : (high))) __device__ int grad[12][3] = { {1, 1, 0}, {-1, 1, 0}, {1, -1, 0}, {-1, -1, 0}, {1, 0, 1}, {-1, 0, 1}, {1, 0, -1}, {-1, 0, -1}, {0, 1, 1}, {0, -1, 1}, {0, 1, -1}, {0, -1, -1} }; __device__ inline Real_t dot(int gradIdx, Real_t x, Real_t y, Real_t z) { return grad[gradIdx][0] * x + grad[gradIdx][1] * y + grad[gradIdx][2] * z; } __device__ inline Real_t fade(Real_t t) { return t * t * t * (t * (t * 6.0 - 15.0) + 10.0); } __device__ inline Real_t lerp(Real_t x, Real_t y, Real_t t) { return (1.0 - t) * x + t * y; } __device__ inline Real_t signedNoise(int * permutations, Real_t x, Real_t y, Real_t z) { int ix = static_cast<int>(x); int iy = static_cast<int>(y); int iz = static_cast<int>(z); x -= ix; y -= iy; z -= iz; ix &= 255; iy &= 255; iz &= 255; int g000 = permutations[ix + permutations[iy + permutations[iz]]] % 12; int g001 = permutations[ix + permutations[iy + permutations[iz + 1]]] % 12; int g010 = permutations[ix + permutations[iy + 1 + permutations[iz]]] % 12; int g011 = permutations[ix + permutations[iy + 1 + permutations[iz + 1]]] % 12; int g100 = permutations[ix + 1 + permutations[iy + permutations[iz]]] % 12; int g101 = permutations[ix + 1 + permutations[iy + permutations[iz + 1]]] % 12; int g110 = permutations[ix + 1 + permutations[iy + 1 + permutations[iz]]] % 12; int g111 = permutations[ix + 1 + permutations[iy + 1 + permutations[iz + 1]]] % 12; Real_t n000 = dot(g000, x, y, z); Real_t n100 = dot(g100, x-1, y, z); Real_t n010 = dot(g010, x, y-1, z); Real_t n110 = dot(g110, x-1, y-1, z); Real_t n001 = dot(g001, x, y, z-1); Real_t n101 = dot(g101, x-1, y, z-1); Real_t n011 = dot(g011, x, y-1, z-1); Real_t n111 = dot(g111, x-1, y-1, z-1); Real_t u = fade(x); Real_t v = fade(y); Real_t w = fade(z); Real_t nx00 = lerp(n000, n100, u); Real_t nx01 = lerp(n001, n101, u); Real_t nx10 = lerp(n010, n110, u); Real_t nx11 = lerp(n011, n111, u); Real_t nxy0 = lerp(nx00, nx10, v); Real_t nxy1 = lerp(nx01, nx11, v); Real_t nxyz = lerp(nxy0, nxy1, w); return nxyz; } extern "C" __global__ void monoFractal(Real_t * vals, int * permutations, Real_t amplitude, Real_t frequency, Real_t gain, Real_t lacunarity, Real_t scale, Real_t increment, int width, int height, int depth) { const int xIndex = threadIdx.x + blockIdx.x * blockDim.x; const int yIndex = threadIdx.y + blockIdx.y * blockDim.y; const int zIndex = threadIdx.z + blockIdx.z * blockDim.z; if (xIndex >= width || yIndex >= height) return ; const int index = xIndex + width * yIndex ; Real_t noiseVal = 0.0f; Real_t freq = frequency; Real_t x = xIndex * frequency / scale; Real_t y = yIndex * frequency / scale; Real_t z = zIndex * frequency / scale; Real_t tmp = 0.0; for (int ii = 0; ii < OCTAVES; ii++) { tmp = signedNoise(permutations, x * freq, y * freq, z * freq); tmp *= pow(lacunarity, -((Real_t) ii) * increment); noiseVal += tmp; freq *= lacunarity; } vals[index] = clamp(noiseVal, 0.0, 1.0); } extern "C" __global__ void multiFractal(Real_t * vals, int * permutations, Real_t amplitude, Real_t frequency, Real_t gain, Real_t lacunarity, Real_t scale, Real_t increment, int width, int height, int depth) { const int xIndex = threadIdx.x + blockIdx.x * blockDim.x; const int yIndex = threadIdx.y + blockIdx.y * blockDim.y; const int zIndex = threadIdx.z + blockIdx.z * blockDim.z; if (xIndex >= width || yIndex >= height) return ; const int index = xIndex + width * yIndex ; Real_t noiseVal = 1.0f; Real_t freq = frequency; Real_t x = xIndex * frequency / scale; Real_t y = yIndex * frequency / scale; Real_t z = zIndex * frequency / scale; Real_t tmp = 0.0; increment *= 0.01; for (int ii = 0; ii < OCTAVES; ii++) { tmp = signedNoise(permutations, x * freq, y * freq, z * freq) + 1.0; tmp *= pow(lacunarity, -((Real_t) ii) * increment); noiseVal *= tmp; freq *= lacunarity; } vals[index] = clamp(noiseVal, 0.0, 1.0); } extern "C" __global__ void turbulence(Real_t * vals, int * permutations, Real_t amplitude, Real_t frequency, Real_t gain, Real_t lacunarity, Real_t scale, Real_t increment, int width, int height, int depth) { const int xIndex = threadIdx.x + blockIdx.x * blockDim.x; const int yIndex = threadIdx.y + blockIdx.y * blockDim.y; const int zIndex = threadIdx.z + blockIdx.z * blockDim.z; if (xIndex >= width || yIndex >= height) return ; const int index = xIndex + width * yIndex ; Real_t noiseVal = 0.0f; Real_t freq = frequency; Real_t x = xIndex * frequency / scale; Real_t y = yIndex * frequency / scale; Real_t z = zIndex * frequency / scale; Real_t tmp = 0.0; for (int ii = 0; ii < OCTAVES; ii++) { tmp = signedNoise(permutations, x * freq, y * freq, z * freq); tmp *= pow(lacunarity, -((Real_t) ii) * increment); noiseVal += abs(tmp); freq *= lacunarity; } vals[index] = clamp(noiseVal, 0.0, 1.0); } extern "C" __global__ void ridgeMultifractal(Real_t * vals, int * permutations, Real_t amplitude, Real_t frequency, Real_t gain, Real_t lacunarity, Real_t scale, Real_t increment, int width, int height, int depth) { const int xIndex = threadIdx.x + blockIdx.x * blockDim.x; const int yIndex = threadIdx.y + blockIdx.y * blockDim.y; const int zIndex = threadIdx.z + blockIdx.z * blockDim.z; if (xIndex >= width || yIndex >= height) return ; const int index = xIndex + width * yIndex ; Real_t noiseVal = 0.0f; Real_t freq = frequency; Real_t x = xIndex * frequency / scale; Real_t y = yIndex * frequency / scale; Real_t z = zIndex * frequency / scale; Real_t offset = 1.0, threshold = 0.5, a = 1.0, tmp = 0.0; for (int ii = 0; ii <= OCTAVES; ii++) { tmp = abs(signedNoise(permutations, x * freq, y * freq, z * freq)); tmp = offset - tmp; tmp *= tmp * a; noiseVal += tmp * pow(lacunarity, -((Real_t) ii) * increment); a = clamp(tmp * threshold, 0.0, 1.0); freq *= lacunarity; } vals[index] = clamp(noiseVal, 0.0, 1.0); } extern "C" __global__ void classicPerlin(Real_t * vals, int * permutations, Real_t amplitude, Real_t frequency, Real_t gain, Real_t lacunarity, Real_t scale, Real_t increment, int width, int height, int depth) { const int xIndex = threadIdx.x + blockIdx.x * blockDim.x; const int yIndex = threadIdx.y + blockIdx.y * blockDim.y; const int zIndex = threadIdx.z + blockIdx.z * blockDim.z; if (xIndex >= width || yIndex >= height) return ; const int index = xIndex + width * yIndex ; Real_t noiseVal = 0.0f; Real_t freq = frequency; Real_t amp = amplitude; Real_t x = xIndex * frequency / scale; Real_t y = yIndex * frequency / scale; Real_t z = zIndex * frequency / scale; for (int ii = 0; ii < OCTAVES; ii++) { noiseVal += signedNoise(permutations, x * freq, y * freq, z * freq) * amp; freq *= lacunarity; amp *= gain; } vals[index] = clamp(noiseVal, 0.0, 1.0); }
420612870899fbcaa13997b35a85a2feb03cfa1c.cu
# ifndef OCTAVES # define OCTAVES 8 # endif # define clamp(a, low, high) (((a) >= (low) && (low) <= (high)) ? (a) : ((a) < (low) ? (low) : (high))) __device__ int grad[12][3] = { {1, 1, 0}, {-1, 1, 0}, {1, -1, 0}, {-1, -1, 0}, {1, 0, 1}, {-1, 0, 1}, {1, 0, -1}, {-1, 0, -1}, {0, 1, 1}, {0, -1, 1}, {0, 1, -1}, {0, -1, -1} }; __device__ inline Real_t dot(int gradIdx, Real_t x, Real_t y, Real_t z) { return grad[gradIdx][0] * x + grad[gradIdx][1] * y + grad[gradIdx][2] * z; } __device__ inline Real_t fade(Real_t t) { return t * t * t * (t * (t * 6.0 - 15.0) + 10.0); } __device__ inline Real_t lerp(Real_t x, Real_t y, Real_t t) { return (1.0 - t) * x + t * y; } __device__ inline Real_t signedNoise(int * permutations, Real_t x, Real_t y, Real_t z) { int ix = static_cast<int>(x); int iy = static_cast<int>(y); int iz = static_cast<int>(z); x -= ix; y -= iy; z -= iz; ix &= 255; iy &= 255; iz &= 255; int g000 = permutations[ix + permutations[iy + permutations[iz]]] % 12; int g001 = permutations[ix + permutations[iy + permutations[iz + 1]]] % 12; int g010 = permutations[ix + permutations[iy + 1 + permutations[iz]]] % 12; int g011 = permutations[ix + permutations[iy + 1 + permutations[iz + 1]]] % 12; int g100 = permutations[ix + 1 + permutations[iy + permutations[iz]]] % 12; int g101 = permutations[ix + 1 + permutations[iy + permutations[iz + 1]]] % 12; int g110 = permutations[ix + 1 + permutations[iy + 1 + permutations[iz]]] % 12; int g111 = permutations[ix + 1 + permutations[iy + 1 + permutations[iz + 1]]] % 12; Real_t n000 = dot(g000, x, y, z); Real_t n100 = dot(g100, x-1, y, z); Real_t n010 = dot(g010, x, y-1, z); Real_t n110 = dot(g110, x-1, y-1, z); Real_t n001 = dot(g001, x, y, z-1); Real_t n101 = dot(g101, x-1, y, z-1); Real_t n011 = dot(g011, x, y-1, z-1); Real_t n111 = dot(g111, x-1, y-1, z-1); Real_t u = fade(x); Real_t v = fade(y); Real_t w = fade(z); Real_t nx00 = lerp(n000, n100, u); Real_t nx01 = lerp(n001, n101, u); Real_t nx10 = lerp(n010, n110, u); Real_t nx11 = lerp(n011, n111, u); Real_t nxy0 = lerp(nx00, nx10, v); Real_t nxy1 = lerp(nx01, nx11, v); Real_t nxyz = lerp(nxy0, nxy1, w); return nxyz; } extern "C" __global__ void monoFractal(Real_t * vals, int * permutations, Real_t amplitude, Real_t frequency, Real_t gain, Real_t lacunarity, Real_t scale, Real_t increment, int width, int height, int depth) { const int xIndex = threadIdx.x + blockIdx.x * blockDim.x; const int yIndex = threadIdx.y + blockIdx.y * blockDim.y; const int zIndex = threadIdx.z + blockIdx.z * blockDim.z; if (xIndex >= width || yIndex >= height) return ; const int index = xIndex + width * yIndex ; Real_t noiseVal = 0.0f; Real_t freq = frequency; Real_t x = xIndex * frequency / scale; Real_t y = yIndex * frequency / scale; Real_t z = zIndex * frequency / scale; Real_t tmp = 0.0; for (int ii = 0; ii < OCTAVES; ii++) { tmp = signedNoise(permutations, x * freq, y * freq, z * freq); tmp *= pow(lacunarity, -((Real_t) ii) * increment); noiseVal += tmp; freq *= lacunarity; } vals[index] = clamp(noiseVal, 0.0, 1.0); } extern "C" __global__ void multiFractal(Real_t * vals, int * permutations, Real_t amplitude, Real_t frequency, Real_t gain, Real_t lacunarity, Real_t scale, Real_t increment, int width, int height, int depth) { const int xIndex = threadIdx.x + blockIdx.x * blockDim.x; const int yIndex = threadIdx.y + blockIdx.y * blockDim.y; const int zIndex = threadIdx.z + blockIdx.z * blockDim.z; if (xIndex >= width || yIndex >= height) return ; const int index = xIndex + width * yIndex ; Real_t noiseVal = 1.0f; Real_t freq = frequency; Real_t x = xIndex * frequency / scale; Real_t y = yIndex * frequency / scale; Real_t z = zIndex * frequency / scale; Real_t tmp = 0.0; increment *= 0.01; for (int ii = 0; ii < OCTAVES; ii++) { tmp = signedNoise(permutations, x * freq, y * freq, z * freq) + 1.0; tmp *= pow(lacunarity, -((Real_t) ii) * increment); noiseVal *= tmp; freq *= lacunarity; } vals[index] = clamp(noiseVal, 0.0, 1.0); } extern "C" __global__ void turbulence(Real_t * vals, int * permutations, Real_t amplitude, Real_t frequency, Real_t gain, Real_t lacunarity, Real_t scale, Real_t increment, int width, int height, int depth) { const int xIndex = threadIdx.x + blockIdx.x * blockDim.x; const int yIndex = threadIdx.y + blockIdx.y * blockDim.y; const int zIndex = threadIdx.z + blockIdx.z * blockDim.z; if (xIndex >= width || yIndex >= height) return ; const int index = xIndex + width * yIndex ; Real_t noiseVal = 0.0f; Real_t freq = frequency; Real_t x = xIndex * frequency / scale; Real_t y = yIndex * frequency / scale; Real_t z = zIndex * frequency / scale; Real_t tmp = 0.0; for (int ii = 0; ii < OCTAVES; ii++) { tmp = signedNoise(permutations, x * freq, y * freq, z * freq); tmp *= pow(lacunarity, -((Real_t) ii) * increment); noiseVal += abs(tmp); freq *= lacunarity; } vals[index] = clamp(noiseVal, 0.0, 1.0); } extern "C" __global__ void ridgeMultifractal(Real_t * vals, int * permutations, Real_t amplitude, Real_t frequency, Real_t gain, Real_t lacunarity, Real_t scale, Real_t increment, int width, int height, int depth) { const int xIndex = threadIdx.x + blockIdx.x * blockDim.x; const int yIndex = threadIdx.y + blockIdx.y * blockDim.y; const int zIndex = threadIdx.z + blockIdx.z * blockDim.z; if (xIndex >= width || yIndex >= height) return ; const int index = xIndex + width * yIndex ; Real_t noiseVal = 0.0f; Real_t freq = frequency; Real_t x = xIndex * frequency / scale; Real_t y = yIndex * frequency / scale; Real_t z = zIndex * frequency / scale; Real_t offset = 1.0, threshold = 0.5, a = 1.0, tmp = 0.0; for (int ii = 0; ii <= OCTAVES; ii++) { tmp = abs(signedNoise(permutations, x * freq, y * freq, z * freq)); tmp = offset - tmp; tmp *= tmp * a; noiseVal += tmp * pow(lacunarity, -((Real_t) ii) * increment); a = clamp(tmp * threshold, 0.0, 1.0); freq *= lacunarity; } vals[index] = clamp(noiseVal, 0.0, 1.0); } extern "C" __global__ void classicPerlin(Real_t * vals, int * permutations, Real_t amplitude, Real_t frequency, Real_t gain, Real_t lacunarity, Real_t scale, Real_t increment, int width, int height, int depth) { const int xIndex = threadIdx.x + blockIdx.x * blockDim.x; const int yIndex = threadIdx.y + blockIdx.y * blockDim.y; const int zIndex = threadIdx.z + blockIdx.z * blockDim.z; if (xIndex >= width || yIndex >= height) return ; const int index = xIndex + width * yIndex ; Real_t noiseVal = 0.0f; Real_t freq = frequency; Real_t amp = amplitude; Real_t x = xIndex * frequency / scale; Real_t y = yIndex * frequency / scale; Real_t z = zIndex * frequency / scale; for (int ii = 0; ii < OCTAVES; ii++) { noiseVal += signedNoise(permutations, x * freq, y * freq, z * freq) * amp; freq *= lacunarity; amp *= gain; } vals[index] = clamp(noiseVal, 0.0, 1.0); }
c78a9ede1af6ac2823066bf27c2f9e771f9482ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/aggregation.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/aggregation/aggregation.cuh> #include <cudf/detail/copy.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/groupby/sort_helper.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/rolling.hpp> #include <cudf/types.hpp> #include <cudf/utilities/bit.hpp> #include <rolling/rolling_detail.hpp> #include <rolling/rolling_jit_detail.hpp> #include <jit/launcher.h> #include <jit/parser.h> #include <jit/type.h> #include <rolling/jit/code/code.h> #include <bit.hpp.jit> #include <rolling_jit_detail.hpp.jit> #include <types.hpp.jit> #include <thrust/binary_search.h> #include <rmm/device_scalar.hpp> #include <memory> namespace cudf { namespace detail { namespace { // anonymous /** * @brief Only count operation is executed and count is updated * depending on `min_periods` and returns true if it was * valid, else false. */ template <typename InputType, typename OutputType, typename agg_op, aggregation::Kind op, bool has_nulls> std::enable_if_t<op == aggregation::COUNT_VALID || op == aggregation::COUNT_ALL, bool> __device__ process_rolling_window(column_device_view input, mutable_column_device_view output, size_type start_index, size_type end_index, size_type current_index, size_type min_periods) { // declare this as volatile to avoid some compiler optimizations that lead to incorrect results // for CUDA 10.0 and below (fixed in CUDA 10.1) volatile cudf::size_type count = 0; for (size_type j = start_index; j < end_index; j++) { if (op == aggregation::COUNT_ALL || !has_nulls || input.is_valid(j)) { count++; } } bool output_is_valid = (count >= min_periods); output.element<OutputType>(current_index) = count; return output_is_valid; } /** * @brief Calculates row-number within [start_index, end_index). * Count is updated depending on `min_periods` * Returns true if it was valid, else false. */ template <typename InputType, typename OutputType, typename agg_op, aggregation::Kind op, bool has_nulls> std::enable_if_t<op == aggregation::ROW_NUMBER, bool> __device__ process_rolling_window(column_device_view input, mutable_column_device_view output, size_type start_index, size_type end_index, size_type current_index, size_type min_periods) { bool output_is_valid = ((end_index - start_index) >= min_periods); output.element<OutputType>(current_index) = ((current_index - start_index) + 1); return output_is_valid; } /** * @brief Only used for `string_view` type to get ARGMIN and ARGMAX, which * will be used to gather MIN and MAX. And returns true if the * operation was valid, else false. */ template <typename InputType, typename OutputType, typename agg_op, aggregation::Kind op, bool has_nulls> std::enable_if_t<(op == aggregation::ARGMIN or op == aggregation::ARGMAX) and std::is_same<InputType, cudf::string_view>::value, bool> __device__ process_rolling_window(column_device_view input, mutable_column_device_view output, size_type start_index, size_type end_index, size_type current_index, size_type min_periods) { // declare this as volatile to avoid some compiler optimizations that lead to incorrect results // for CUDA 10.0 and below (fixed in CUDA 10.1) volatile cudf::size_type count = 0; InputType val = agg_op::template identity<InputType>(); OutputType val_index = (op == aggregation::ARGMIN) ? ARGMIN_SENTINEL : ARGMAX_SENTINEL; for (size_type j = start_index; j < end_index; j++) { if (!has_nulls || input.is_valid(j)) { InputType element = input.element<InputType>(j); val = agg_op{}(element, val); if (val == element) { val_index = j; } count++; } } bool output_is_valid = (count >= min_periods); // -1 will help identify null elements while gathering for Min and Max // In case of count, this would be null, so doesn't matter. output.element<OutputType>(current_index) = (output_is_valid) ? val_index : -1; // The gather mask shouldn't contain null values, so // always return zero return true; } /** * @brief Operates on only fixed-width types and returns true if the * operation was valid, else false. */ template <typename InputType, typename OutputType, typename agg_op, aggregation::Kind op, bool has_nulls> std::enable_if_t<!std::is_same<InputType, cudf::string_view>::value and !(op == aggregation::COUNT_VALID || op == aggregation::COUNT_ALL || op == aggregation::ROW_NUMBER), bool> __device__ process_rolling_window(column_device_view input, mutable_column_device_view output, size_type start_index, size_type end_index, size_type current_index, size_type min_periods) { // declare this as volatile to avoid some compiler optimizations that lead to incorrect results // for CUDA 10.0 and below (fixed in CUDA 10.1) volatile cudf::size_type count = 0; OutputType val = agg_op::template identity<OutputType>(); for (size_type j = start_index; j < end_index; j++) { if (!has_nulls || input.is_valid(j)) { OutputType element = input.element<InputType>(j); val = agg_op{}(element, val); count++; } } bool output_is_valid = (count >= min_periods); // store the output value, one per thread cudf::detail::rolling_store_output_functor<OutputType, op == aggregation::MEAN>{}( output.element<OutputType>(current_index), val, count); return output_is_valid; } /** * @brief Computes the rolling window function * * @tparam InputType Datatype of `input` * @tparam OutputType Datatype of `output` * @tparam agg_op A functor that defines the aggregation operation * @tparam op The aggregation operator (enum value) * @tparam block_size CUDA block size for the kernel * @tparam has_nulls true if the input column has nulls * @tparam PrecedingWindowIterator iterator type (inferred) * @tparam FollowingWindowIterator iterator type (inferred) * @param input Input column device view * @param output Output column device view * @param preceding_window_begin[in] Rolling window size iterator, accumulates from * in_col[i-preceding_window] to in_col[i] inclusive * @param following_window_begin[in] Rolling window size iterator in the forward * direction, accumulates from in_col[i] to * in_col[i+following_window] inclusive * @param min_periods[in] Minimum number of observations in window required to * have a value, otherwise 0 is stored in the valid bit mask */ template <typename InputType, typename OutputType, typename agg_op, aggregation::Kind op, int block_size, bool has_nulls, typename PrecedingWindowIterator, typename FollowingWindowIterator> __launch_bounds__(block_size) __global__ void gpu_rolling(column_device_view input, mutable_column_device_view output, size_type* __restrict__ output_valid_count, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods) { size_type i = blockIdx.x * block_size + threadIdx.x; size_type stride = block_size * gridDim.x; size_type warp_valid_count{0}; auto active_threads = __ballot_sync(0xffffffff, i < input.size()); while (i < input.size()) { size_type preceding_window = preceding_window_begin[i]; size_type following_window = following_window_begin[i]; // compute bounds size_type start = min(input.size(), max(0, i - preceding_window + 1)); size_type end = min(input.size(), max(0, i + following_window + 1)); size_type start_index = min(start, end); size_type end_index = max(start, end); // aggregate // TODO: We should explore using shared memory to avoid redundant loads. // This might require separating the kernel into a special version // for dynamic and static sizes. volatile bool output_is_valid = false; output_is_valid = process_rolling_window<InputType, OutputType, agg_op, op, has_nulls>( input, output, start_index, end_index, i, min_periods); // set the mask cudf::bitmask_type result_mask{__ballot_sync(active_threads, output_is_valid)}; // only one thread writes the mask if (0 == threadIdx.x % cudf::detail::warp_size) { output.set_mask_word(cudf::word_index(i), result_mask); warp_valid_count += __popc(result_mask); } // process next element i += stride; active_threads = __ballot_sync(active_threads, i < input.size()); } // sum the valid counts across the whole block size_type block_valid_count = cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count); if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); } } template <typename InputType> struct rolling_window_launcher { template <typename T, typename agg_op, aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> size_type kernel_launcher(column_view const& input, mutable_column_view& output, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, hipStream_t stream) { constexpr cudf::size_type block_size = 256; cudf::detail::grid_1d grid(input.size(), block_size); auto input_device_view = column_device_view::create(input, stream); auto output_device_view = mutable_column_device_view::create(output, stream); rmm::device_scalar<size_type> device_valid_count{0, stream}; if (input.has_nulls()) { hipLaunchKernelGGL(( gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, true>) , dim3(grid.num_blocks), dim3(block_size), 0, stream, *input_device_view, *output_device_view, device_valid_count.data(), preceding_window_begin, following_window_begin, min_periods); } else { hipLaunchKernelGGL(( gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, false>) , dim3(grid.num_blocks), dim3(block_size), 0, stream, *input_device_view, *output_device_view, device_valid_count.data(), preceding_window_begin, following_window_begin, min_periods); } size_type valid_count = device_valid_count.value(stream); // check the stream for debugging CHECK_CUDA(stream); return valid_count; } // This launch is only for fixed width columns with valid aggregation option // numeric: All // timestamp: MIN, MAX, COUNT_VALID, COUNT_ALL, ROW_NUMBER // string, dictionary, list : COUNT_VALID, COUNT_ALL, ROW_NUMBER template <typename T, typename agg_op, aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<cudf::detail::is_rolling_supported<T, agg_op, op>() and !cudf::detail::is_rolling_string_specialization<T, agg_op, op>(), std::unique_ptr<column>> launch(column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, hipStream_t stream) { if (input.is_empty()) return empty_like(input); auto output = make_fixed_width_column( target_type(input.type(), op), input.size(), mask_state::UNINITIALIZED, stream, mr); cudf::mutable_column_view output_view = output->mutable_view(); auto valid_count = kernel_launcher<T, agg_op, op, PrecedingWindowIterator, FollowingWindowIterator>( input, output_view, preceding_window_begin, following_window_begin, min_periods, agg, stream); output->set_null_count(output->size() - valid_count); return output; } // This launch is only for string specializations // string: MIN, MAX template <typename T, typename agg_op, aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<cudf::detail::is_rolling_string_specialization<T, agg_op, op>(), std::unique_ptr<column>> launch(column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, hipStream_t stream) { if (input.is_empty()) return empty_like(input); auto output = make_numeric_column(cudf::data_type{cudf::type_to_id<size_type>()}, input.size(), cudf::mask_state::UNINITIALIZED, stream, mr); cudf::mutable_column_view output_view = output->mutable_view(); // Passing the agg_op and aggregation::Kind as constant to group them in pair, else it // evolves to error when try to use agg_op as compiler tries different combinations if (op == aggregation::MIN) { kernel_launcher<T, DeviceMin, aggregation::ARGMIN, PrecedingWindowIterator, FollowingWindowIterator>(input, output_view, preceding_window_begin, following_window_begin, min_periods, agg, stream); } else if (op == aggregation::MAX) { kernel_launcher<T, DeviceMax, aggregation::ARGMAX, PrecedingWindowIterator, FollowingWindowIterator>(input, output_view, preceding_window_begin, following_window_begin, min_periods, agg, stream); } else { CUDF_FAIL("MIN and MAX are the only supported aggregation types for string columns"); } // The rows that represent null elements will be having negative values in gather map, // and that's why nullify_out_of_bounds/ignore_out_of_bounds is true. auto output_table = detail::gather(table_view{{input}}, output->view(), detail::out_of_bounds_policy::IGNORE, detail::negative_index_policy::NOT_ALLOWED, mr, stream); return std::make_unique<cudf::column>(std::move(output_table->get_column(0))); } // Deals with invalid column and/or aggregation options template <typename T, typename agg_op, aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<!cudf::detail::is_rolling_supported<T, agg_op, op>() and !cudf::detail::is_rolling_string_specialization<T, agg_op, op>(), std::unique_ptr<column>> launch(column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_FAIL("Aggregation operator and/or input type combination is invalid"); } template <aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<!(op == aggregation::MEAN), std::unique_ptr<column>> operator()( column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, hipStream_t stream) { return launch<InputType, typename corresponding_operator<op>::type, op, PrecedingWindowIterator, FollowingWindowIterator>( input, preceding_window_begin, following_window_begin, min_periods, agg, mr, stream); } // This variant is just to handle mean template <aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<(op == aggregation::MEAN), std::unique_ptr<column>> operator()( column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, hipStream_t stream) { return launch<InputType, cudf::DeviceSum, op, PrecedingWindowIterator, FollowingWindowIterator>( input, preceding_window_begin, following_window_begin, min_periods, agg, mr, stream); } }; struct dispatch_rolling { template <typename T, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::unique_ptr<column> operator()(column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, hipStream_t stream) { return aggregation_dispatcher(agg->kind, rolling_window_launcher<T>{}, input, preceding_window_begin, following_window_begin, min_periods, agg, mr, stream); } }; } // namespace // Applies a user-defined rolling window function to the values in a column. template <typename PrecedingWindowIterator, typename FollowingWindowIterator> std::unique_ptr<column> rolling_window_udf(column_view const& input, PrecedingWindowIterator preceding_window, std::string const& preceding_window_str, FollowingWindowIterator following_window, std::string const& following_window_str, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, hipStream_t stream = 0) { static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(), "bitmask_type size does not match CUDA warp size"); if (input.has_nulls()) CUDF_FAIL("Currently the UDF version of rolling window does NOT support inputs with nulls."); min_periods = ::max(min_periods, 0); auto udf_agg = static_cast<udf_aggregation*>(agg.get()); std::string hash = "prog_rolling." + std::to_string(std::hash<std::string>{}(udf_agg->_source)); std::string cuda_source; switch (udf_agg->kind) { case aggregation::Kind::PTX: cuda_source = cudf::rolling::jit::code::kernel_headers; cuda_source += cudf::jit::parse_single_function_ptx(udf_agg->_source, udf_agg->_function_name, cudf::jit::get_type_name(udf_agg->_output_type), {0, 5}); // args 0 and 5 are pointers. cuda_source += cudf::rolling::jit::code::kernel; break; case aggregation::Kind::CUDA: cuda_source = cudf::rolling::jit::code::kernel_headers; cuda_source += cudf::jit::parse_single_function_cuda(udf_agg->_source, udf_agg->_function_name); cuda_source += cudf::rolling::jit::code::kernel; break; default: CUDF_FAIL("Unsupported UDF type."); } std::unique_ptr<column> output = make_numeric_column( udf_agg->_output_type, input.size(), cudf::mask_state::UNINITIALIZED, stream, mr); auto output_view = output->mutable_view(); rmm::device_scalar<size_type> device_valid_count{0, stream}; const std::vector<std::string> compiler_flags{"-std=c++14", // Have jitify prune unused global variables "-remove-unused-globals", // suppress all NVRTC warnings "-w"}; // Launch the jitify kernel cudf::jit::launcher(hash, cuda_source, {cudf_types_hpp, cudf_utilities_bit_hpp, cudf::rolling::jit::code::operation_h, ___src_rolling_rolling_jit_detail_hpp}, compiler_flags, nullptr, stream) .set_kernel_inst("gpu_rolling_new", // name of the kernel we are launching {cudf::jit::get_type_name(input.type()), // list of template arguments cudf::jit::get_type_name(output->type()), udf_agg->_operator_name, preceding_window_str.c_str(), following_window_str.c_str()}) .launch(input.size(), cudf::jit::get_data_ptr(input), input.null_mask(), cudf::jit::get_data_ptr(output_view), output_view.null_mask(), device_valid_count.data(), preceding_window, following_window, min_periods); output->set_null_count(output->size() - device_valid_count.value(stream)); // check the stream for debugging CHECK_CUDA(stream); return output; } /** * @copydoc cudf::rolling_window(column_view const& input, * PrecedingWindowIterator preceding_window_begin, * FollowingWindowIterator following_window_begin, * size_type min_periods, * std::unique_ptr<aggregation> const& agg, * rmm::mr::device_memory_resource* mr) * * @param stream CUDA stream used for device memory operations and kernel launches. */ template <typename PrecedingWindowIterator, typename FollowingWindowIterator> std::unique_ptr<column> rolling_window(column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, hipStream_t stream = 0) { static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(), "bitmask_type size does not match CUDA warp size"); min_periods = ::max(min_periods, 0); return cudf::type_dispatcher(input.type(), dispatch_rolling{}, input, preceding_window_begin, following_window_begin, min_periods, agg, mr, stream); } } // namespace detail // Applies a fixed-size rolling window function to the values in a column. std::unique_ptr<column> rolling_window(column_view const& input, size_type preceding_window, size_type following_window, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); if (input.size() == 0) return empty_like(input); CUDF_EXPECTS((min_periods >= 0), "min_periods must be non-negative"); if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) { return cudf::detail::rolling_window_udf(input, preceding_window, "cudf::size_type", following_window, "cudf::size_type", min_periods, agg, mr, 0); } else { auto preceding_window_begin = thrust::make_constant_iterator(preceding_window); auto following_window_begin = thrust::make_constant_iterator(following_window); return cudf::detail::rolling_window( input, preceding_window_begin, following_window_begin, min_periods, agg, mr, 0); } } // Applies a variable-size rolling window function to the values in a column. std::unique_ptr<column> rolling_window(column_view const& input, column_view const& preceding_window, column_view const& following_window, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); if (preceding_window.size() == 0 || following_window.size() == 0 || input.size() == 0) return empty_like(input); CUDF_EXPECTS(preceding_window.type().id() == type_id::INT32 && following_window.type().id() == type_id::INT32, "preceding_window/following_window must have type_id::INT32 type"); CUDF_EXPECTS(preceding_window.size() == input.size() && following_window.size() == input.size(), "preceding_window/following_window size must match input size"); if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) { return cudf::detail::rolling_window_udf(input, preceding_window.begin<size_type>(), "cudf::size_type*", following_window.begin<size_type>(), "cudf::size_type*", min_periods, agg, mr, 0); } else { return cudf::detail::rolling_window(input, preceding_window.begin<size_type>(), following_window.begin<size_type>(), min_periods, agg, mr, 0); } } std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys, column_view const& input, size_type preceding_window, size_type following_window, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); if (input.size() == 0) return empty_like(input); CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()), "Size mismatch between group_keys and input vector."); CUDF_EXPECTS((min_periods > 0), "min_periods must be positive"); if (group_keys.num_columns() == 0) { // No Groupby columns specified. Treat as one big group. return rolling_window(input, preceding_window, following_window, min_periods, aggr, mr); } using sort_groupby_helper = cudf::groupby::detail::sort::sort_groupby_helper; sort_groupby_helper helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES}; auto group_offsets{helper.group_offsets()}; auto const& group_labels{helper.group_labels()}; // `group_offsets` are interpreted in adjacent pairs, each pair representing the offsets // of the first, and one past the last elements in a group. // // If `group_offsets` is not empty, it must contain at least two offsets: // a. 0, indicating the first element in `input` // b. input.size(), indicating one past the last element in `input`. // // Thus, for an input of 1000 rows, // 0. [] indicates a single group, spanning the entire column. // 1 [10] is invalid. // 2. [0, 1000] indicates a single group, spanning the entire column (thus, equivalent to no // groups.) // 3. [0, 500, 1000] indicates two equal-sized groups: [0,500), and [500,1000). assert(group_offsets.size() >= 2 && group_offsets[0] == 0 && group_offsets[group_offsets.size() - 1] == input.size() && "Must have at least one group."); auto preceding_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), preceding_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_start = d_group_offsets[group_label]; return thrust::minimum<size_type>{}(preceding_window, idx - group_start + 1); // Preceding includes current row. }; auto following_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), following_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_end = d_group_offsets[group_label + 1]; // Cannot fall off the end, since offsets is capped with `input.size()`. return thrust::minimum<size_type>{}(following_window, (group_end - 1) - idx); }; if (aggr->kind == aggregation::CUDA || aggr->kind == aggregation::PTX) { cudf::detail::preceding_window_wrapper grouped_preceding_window{ group_offsets.data().get(), group_labels.data().get(), preceding_window}; cudf::detail::following_window_wrapper grouped_following_window{ group_offsets.data().get(), group_labels.data().get(), following_window}; return cudf::detail::rolling_window_udf(input, grouped_preceding_window, "cudf::detail::preceding_window_wrapper", grouped_following_window, "cudf::detail::following_window_wrapper", min_periods, aggr, mr, 0); } else { return cudf::detail::rolling_window( input, thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), preceding_calculator), thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), following_calculator), min_periods, aggr, mr, 0); } } namespace { bool is_supported_range_frame_unit(cudf::data_type const& data_type) { auto id = data_type.id(); return id == cudf::type_id::TIMESTAMP_DAYS || id == cudf::type_id::TIMESTAMP_SECONDS || id == cudf::type_id::TIMESTAMP_MILLISECONDS || id == cudf::type_id::TIMESTAMP_MICROSECONDS || id == cudf::type_id::TIMESTAMP_NANOSECONDS; } /// Fetches multiplication factor to normalize window sizes, depending on the datatype of the /// timestamp column. Used for time-based rolling-window operations. E.g. If the timestamp column is /// in TIMESTAMP_SECONDS, and the window sizes are specified in DAYS, the window size needs to be /// multiplied by `24*60*60`, before comparisons with the timestamps. size_t multiplication_factor(cudf::data_type const& data_type) { // Assume timestamps. switch (data_type.id()) { case cudf::type_id::TIMESTAMP_DAYS: return 1L; case cudf::type_id::TIMESTAMP_SECONDS: return 24L * 60 * 60; case cudf::type_id::TIMESTAMP_MILLISECONDS: return 24L * 60 * 60 * 1000; case cudf::type_id::TIMESTAMP_MICROSECONDS: return 24L * 60 * 60 * 1000 * 1000; default: CUDF_EXPECTS(data_type.id() == cudf::type_id::TIMESTAMP_NANOSECONDS, "Unexpected data-type for timestamp-based rolling window operation!"); return 24L * 60 * 60 * 1000 * 1000 * 1000; } } // Time-range window computation, with // 1. no grouping keys specified // 2. timetamps in ASCENDING order. // Treat as one single group. template <typename TimestampImpl_t> std::unique_ptr<column> time_range_window_ASC(column_view const& input, column_view const& timestamp_column, TimestampImpl_t preceding_window, TimestampImpl_t following_window, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { auto preceding_calculator = [d_timestamps = timestamp_column.data<TimestampImpl_t>(), preceding_window] __device__(size_type idx) { auto group_start = 0; auto lowest_timestamp_in_window = d_timestamps[idx] - preceding_window; return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq, d_timestamps + group_start, d_timestamps + idx, lowest_timestamp_in_window)) + 1; // Add 1, for `preceding` to account for current row. }; auto following_calculator = [num_rows = input.size(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), following_window] __device__(size_type idx) { auto group_end = num_rows; auto highest_timestamp_in_window = d_timestamps[idx] + following_window; return (thrust::upper_bound(thrust::seq, d_timestamps + idx, d_timestamps + group_end, highest_timestamp_in_window) - (d_timestamps + idx)) - 1; }; return cudf::detail::rolling_window( input, thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), preceding_calculator), thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), following_calculator), min_periods, aggr, mr); } // Time-range window computation, for timestamps in ASCENDING order. template <typename TimestampImpl_t> std::unique_ptr<column> time_range_window_ASC( column_view const& input, column_view const& timestamp_column, rmm::device_vector<cudf::size_type> const& group_offsets, rmm::device_vector<cudf::size_type> const& group_labels, TimestampImpl_t preceding_window, TimestampImpl_t following_window, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { auto preceding_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), preceding_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_start = d_group_offsets[group_label]; auto lowest_timestamp_in_window = d_timestamps[idx] - preceding_window; return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq, d_timestamps + group_start, d_timestamps + idx, lowest_timestamp_in_window)) + 1; // Add 1, for `preceding` to account for current row. }; auto following_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), following_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_end = d_group_offsets[group_label + 1]; // Cannot fall off the end, since offsets is capped with `input.size()`. auto highest_timestamp_in_window = d_timestamps[idx] + following_window; return (thrust::upper_bound(thrust::seq, d_timestamps + idx, d_timestamps + group_end, highest_timestamp_in_window) - (d_timestamps + idx)) - 1; }; return cudf::detail::rolling_window( input, thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), preceding_calculator), thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), following_calculator), min_periods, aggr, mr); } // Time-range window computation, with // 1. no grouping keys specified // 2. timetamps in DESCENDING order. // Treat as one single group. template <typename TimestampImpl_t> std::unique_ptr<column> time_range_window_DESC(column_view const& input, column_view const& timestamp_column, TimestampImpl_t preceding_window, TimestampImpl_t following_window, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { auto preceding_calculator = [d_timestamps = timestamp_column.data<TimestampImpl_t>(), preceding_window] __device__(size_type idx) { auto group_start = 0; auto highest_timestamp_in_window = d_timestamps[idx] + preceding_window; return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq, d_timestamps + group_start, d_timestamps + idx, highest_timestamp_in_window, thrust::greater<decltype(highest_timestamp_in_window)>())) + 1; // Add 1, for `preceding` to account for current row. }; auto following_calculator = [num_rows = input.size(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), following_window] __device__(size_type idx) { auto group_end = num_rows; // Cannot fall off the end, since offsets is capped with `input.size()`. auto lowest_timestamp_in_window = d_timestamps[idx] - following_window; return (thrust::upper_bound(thrust::seq, d_timestamps + idx, d_timestamps + group_end, lowest_timestamp_in_window, thrust::greater<decltype(lowest_timestamp_in_window)>()) - (d_timestamps + idx)) - 1; }; return cudf::detail::rolling_window( input, thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), preceding_calculator), thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), following_calculator), min_periods, aggr, mr); } // Time-range window computation, for timestamps in DESCENDING order. template <typename TimestampImpl_t> std::unique_ptr<column> time_range_window_DESC( column_view const& input, column_view const& timestamp_column, rmm::device_vector<cudf::size_type> const& group_offsets, rmm::device_vector<cudf::size_type> const& group_labels, TimestampImpl_t preceding_window, TimestampImpl_t following_window, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { auto preceding_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), preceding_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_start = d_group_offsets[group_label]; auto highest_timestamp_in_window = d_timestamps[idx] + preceding_window; return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq, d_timestamps + group_start, d_timestamps + idx, highest_timestamp_in_window, thrust::greater<decltype(highest_timestamp_in_window)>())) + 1; // Add 1, for `preceding` to account for current row. }; auto following_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), following_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_end = d_group_offsets[group_label + 1]; // Cannot fall off the end, since offsets is capped with `input.size()`. auto lowest_timestamp_in_window = d_timestamps[idx] - following_window; return (thrust::upper_bound(thrust::seq, d_timestamps + idx, d_timestamps + group_end, lowest_timestamp_in_window, thrust::greater<decltype(lowest_timestamp_in_window)>()) - (d_timestamps + idx)) - 1; }; if (aggr->kind == aggregation::CUDA || aggr->kind == aggregation::PTX) { CUDF_FAIL("Time ranged rolling window does NOT (yet) support UDF."); } else { return cudf::detail::rolling_window( input, thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), preceding_calculator), thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), following_calculator), min_periods, aggr, mr, 0); } } template <typename TimestampImpl_t> std::unique_ptr<column> grouped_time_range_rolling_window_impl( column_view const& input, column_view const& timestamp_column, cudf::order const& timestamp_ordering, rmm::device_vector<cudf::size_type> const& group_offsets, rmm::device_vector<cudf::size_type> const& group_labels, size_type preceding_window_in_days, // TODO: Consider taking offset-type as type_id. Assumes days // for now. size_type following_window_in_days, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { TimestampImpl_t mult_factor{ static_cast<TimestampImpl_t>(multiplication_factor(timestamp_column.type()))}; if (timestamp_ordering == cudf::order::ASCENDING) { return (group_offsets.size() == 0) ? time_range_window_ASC(input, timestamp_column, preceding_window_in_days * mult_factor, following_window_in_days * mult_factor, min_periods, aggr, mr) : time_range_window_ASC(input, timestamp_column, group_offsets, group_labels, preceding_window_in_days * mult_factor, following_window_in_days * mult_factor, min_periods, aggr, mr); } else { return (group_offsets.size() == 0) ? time_range_window_DESC(input, timestamp_column, preceding_window_in_days * mult_factor, following_window_in_days * mult_factor, min_periods, aggr, mr) : time_range_window_DESC(input, timestamp_column, group_offsets, group_labels, preceding_window_in_days * mult_factor, following_window_in_days * mult_factor, min_periods, aggr, mr); } } } // namespace std::unique_ptr<column> grouped_time_range_rolling_window(table_view const& group_keys, column_view const& timestamp_column, cudf::order const& timestamp_order, column_view const& input, size_type preceding_window_in_days, size_type following_window_in_days, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); if (input.size() == 0) return empty_like(input); CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()), "Size mismatch between group_keys and input vector."); CUDF_EXPECTS((min_periods > 0), "min_periods must be positive"); using sort_groupby_helper = cudf::groupby::detail::sort::sort_groupby_helper; using index_vector = sort_groupby_helper::index_vector; index_vector group_offsets, group_labels; if (group_keys.num_columns() > 0) { sort_groupby_helper helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES}; group_offsets = helper.group_offsets(); group_labels = helper.group_labels(); } // Assumes that `timestamp_column` is actually of a timestamp type. CUDF_EXPECTS(is_supported_range_frame_unit(timestamp_column.type()), "Unsupported data-type for `timestamp`-based rolling window operation!"); return timestamp_column.type().id() == cudf::type_id::TIMESTAMP_DAYS ? grouped_time_range_rolling_window_impl<int32_t>(input, timestamp_column, timestamp_order, group_offsets, group_labels, preceding_window_in_days, following_window_in_days, min_periods, aggr, mr) : grouped_time_range_rolling_window_impl<int64_t>(input, timestamp_column, timestamp_order, group_offsets, group_labels, preceding_window_in_days, following_window_in_days, min_periods, aggr, mr); } } // namespace cudf
c78a9ede1af6ac2823066bf27c2f9e771f9482ea.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/aggregation.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/aggregation/aggregation.cuh> #include <cudf/detail/copy.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/groupby/sort_helper.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/rolling.hpp> #include <cudf/types.hpp> #include <cudf/utilities/bit.hpp> #include <rolling/rolling_detail.hpp> #include <rolling/rolling_jit_detail.hpp> #include <jit/launcher.h> #include <jit/parser.h> #include <jit/type.h> #include <rolling/jit/code/code.h> #include <bit.hpp.jit> #include <rolling_jit_detail.hpp.jit> #include <types.hpp.jit> #include <thrust/binary_search.h> #include <rmm/device_scalar.hpp> #include <memory> namespace cudf { namespace detail { namespace { // anonymous /** * @brief Only count operation is executed and count is updated * depending on `min_periods` and returns true if it was * valid, else false. */ template <typename InputType, typename OutputType, typename agg_op, aggregation::Kind op, bool has_nulls> std::enable_if_t<op == aggregation::COUNT_VALID || op == aggregation::COUNT_ALL, bool> __device__ process_rolling_window(column_device_view input, mutable_column_device_view output, size_type start_index, size_type end_index, size_type current_index, size_type min_periods) { // declare this as volatile to avoid some compiler optimizations that lead to incorrect results // for CUDA 10.0 and below (fixed in CUDA 10.1) volatile cudf::size_type count = 0; for (size_type j = start_index; j < end_index; j++) { if (op == aggregation::COUNT_ALL || !has_nulls || input.is_valid(j)) { count++; } } bool output_is_valid = (count >= min_periods); output.element<OutputType>(current_index) = count; return output_is_valid; } /** * @brief Calculates row-number within [start_index, end_index). * Count is updated depending on `min_periods` * Returns true if it was valid, else false. */ template <typename InputType, typename OutputType, typename agg_op, aggregation::Kind op, bool has_nulls> std::enable_if_t<op == aggregation::ROW_NUMBER, bool> __device__ process_rolling_window(column_device_view input, mutable_column_device_view output, size_type start_index, size_type end_index, size_type current_index, size_type min_periods) { bool output_is_valid = ((end_index - start_index) >= min_periods); output.element<OutputType>(current_index) = ((current_index - start_index) + 1); return output_is_valid; } /** * @brief Only used for `string_view` type to get ARGMIN and ARGMAX, which * will be used to gather MIN and MAX. And returns true if the * operation was valid, else false. */ template <typename InputType, typename OutputType, typename agg_op, aggregation::Kind op, bool has_nulls> std::enable_if_t<(op == aggregation::ARGMIN or op == aggregation::ARGMAX) and std::is_same<InputType, cudf::string_view>::value, bool> __device__ process_rolling_window(column_device_view input, mutable_column_device_view output, size_type start_index, size_type end_index, size_type current_index, size_type min_periods) { // declare this as volatile to avoid some compiler optimizations that lead to incorrect results // for CUDA 10.0 and below (fixed in CUDA 10.1) volatile cudf::size_type count = 0; InputType val = agg_op::template identity<InputType>(); OutputType val_index = (op == aggregation::ARGMIN) ? ARGMIN_SENTINEL : ARGMAX_SENTINEL; for (size_type j = start_index; j < end_index; j++) { if (!has_nulls || input.is_valid(j)) { InputType element = input.element<InputType>(j); val = agg_op{}(element, val); if (val == element) { val_index = j; } count++; } } bool output_is_valid = (count >= min_periods); // -1 will help identify null elements while gathering for Min and Max // In case of count, this would be null, so doesn't matter. output.element<OutputType>(current_index) = (output_is_valid) ? val_index : -1; // The gather mask shouldn't contain null values, so // always return zero return true; } /** * @brief Operates on only fixed-width types and returns true if the * operation was valid, else false. */ template <typename InputType, typename OutputType, typename agg_op, aggregation::Kind op, bool has_nulls> std::enable_if_t<!std::is_same<InputType, cudf::string_view>::value and !(op == aggregation::COUNT_VALID || op == aggregation::COUNT_ALL || op == aggregation::ROW_NUMBER), bool> __device__ process_rolling_window(column_device_view input, mutable_column_device_view output, size_type start_index, size_type end_index, size_type current_index, size_type min_periods) { // declare this as volatile to avoid some compiler optimizations that lead to incorrect results // for CUDA 10.0 and below (fixed in CUDA 10.1) volatile cudf::size_type count = 0; OutputType val = agg_op::template identity<OutputType>(); for (size_type j = start_index; j < end_index; j++) { if (!has_nulls || input.is_valid(j)) { OutputType element = input.element<InputType>(j); val = agg_op{}(element, val); count++; } } bool output_is_valid = (count >= min_periods); // store the output value, one per thread cudf::detail::rolling_store_output_functor<OutputType, op == aggregation::MEAN>{}( output.element<OutputType>(current_index), val, count); return output_is_valid; } /** * @brief Computes the rolling window function * * @tparam InputType Datatype of `input` * @tparam OutputType Datatype of `output` * @tparam agg_op A functor that defines the aggregation operation * @tparam op The aggregation operator (enum value) * @tparam block_size CUDA block size for the kernel * @tparam has_nulls true if the input column has nulls * @tparam PrecedingWindowIterator iterator type (inferred) * @tparam FollowingWindowIterator iterator type (inferred) * @param input Input column device view * @param output Output column device view * @param preceding_window_begin[in] Rolling window size iterator, accumulates from * in_col[i-preceding_window] to in_col[i] inclusive * @param following_window_begin[in] Rolling window size iterator in the forward * direction, accumulates from in_col[i] to * in_col[i+following_window] inclusive * @param min_periods[in] Minimum number of observations in window required to * have a value, otherwise 0 is stored in the valid bit mask */ template <typename InputType, typename OutputType, typename agg_op, aggregation::Kind op, int block_size, bool has_nulls, typename PrecedingWindowIterator, typename FollowingWindowIterator> __launch_bounds__(block_size) __global__ void gpu_rolling(column_device_view input, mutable_column_device_view output, size_type* __restrict__ output_valid_count, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods) { size_type i = blockIdx.x * block_size + threadIdx.x; size_type stride = block_size * gridDim.x; size_type warp_valid_count{0}; auto active_threads = __ballot_sync(0xffffffff, i < input.size()); while (i < input.size()) { size_type preceding_window = preceding_window_begin[i]; size_type following_window = following_window_begin[i]; // compute bounds size_type start = min(input.size(), max(0, i - preceding_window + 1)); size_type end = min(input.size(), max(0, i + following_window + 1)); size_type start_index = min(start, end); size_type end_index = max(start, end); // aggregate // TODO: We should explore using shared memory to avoid redundant loads. // This might require separating the kernel into a special version // for dynamic and static sizes. volatile bool output_is_valid = false; output_is_valid = process_rolling_window<InputType, OutputType, agg_op, op, has_nulls>( input, output, start_index, end_index, i, min_periods); // set the mask cudf::bitmask_type result_mask{__ballot_sync(active_threads, output_is_valid)}; // only one thread writes the mask if (0 == threadIdx.x % cudf::detail::warp_size) { output.set_mask_word(cudf::word_index(i), result_mask); warp_valid_count += __popc(result_mask); } // process next element i += stride; active_threads = __ballot_sync(active_threads, i < input.size()); } // sum the valid counts across the whole block size_type block_valid_count = cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count); if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); } } template <typename InputType> struct rolling_window_launcher { template <typename T, typename agg_op, aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> size_type kernel_launcher(column_view const& input, mutable_column_view& output, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, cudaStream_t stream) { constexpr cudf::size_type block_size = 256; cudf::detail::grid_1d grid(input.size(), block_size); auto input_device_view = column_device_view::create(input, stream); auto output_device_view = mutable_column_device_view::create(output, stream); rmm::device_scalar<size_type> device_valid_count{0, stream}; if (input.has_nulls()) { gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, true> <<<grid.num_blocks, block_size, 0, stream>>>(*input_device_view, *output_device_view, device_valid_count.data(), preceding_window_begin, following_window_begin, min_periods); } else { gpu_rolling<T, target_type_t<InputType, op>, agg_op, op, block_size, false> <<<grid.num_blocks, block_size, 0, stream>>>(*input_device_view, *output_device_view, device_valid_count.data(), preceding_window_begin, following_window_begin, min_periods); } size_type valid_count = device_valid_count.value(stream); // check the stream for debugging CHECK_CUDA(stream); return valid_count; } // This launch is only for fixed width columns with valid aggregation option // numeric: All // timestamp: MIN, MAX, COUNT_VALID, COUNT_ALL, ROW_NUMBER // string, dictionary, list : COUNT_VALID, COUNT_ALL, ROW_NUMBER template <typename T, typename agg_op, aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<cudf::detail::is_rolling_supported<T, agg_op, op>() and !cudf::detail::is_rolling_string_specialization<T, agg_op, op>(), std::unique_ptr<column>> launch(column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { if (input.is_empty()) return empty_like(input); auto output = make_fixed_width_column( target_type(input.type(), op), input.size(), mask_state::UNINITIALIZED, stream, mr); cudf::mutable_column_view output_view = output->mutable_view(); auto valid_count = kernel_launcher<T, agg_op, op, PrecedingWindowIterator, FollowingWindowIterator>( input, output_view, preceding_window_begin, following_window_begin, min_periods, agg, stream); output->set_null_count(output->size() - valid_count); return output; } // This launch is only for string specializations // string: MIN, MAX template <typename T, typename agg_op, aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<cudf::detail::is_rolling_string_specialization<T, agg_op, op>(), std::unique_ptr<column>> launch(column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { if (input.is_empty()) return empty_like(input); auto output = make_numeric_column(cudf::data_type{cudf::type_to_id<size_type>()}, input.size(), cudf::mask_state::UNINITIALIZED, stream, mr); cudf::mutable_column_view output_view = output->mutable_view(); // Passing the agg_op and aggregation::Kind as constant to group them in pair, else it // evolves to error when try to use agg_op as compiler tries different combinations if (op == aggregation::MIN) { kernel_launcher<T, DeviceMin, aggregation::ARGMIN, PrecedingWindowIterator, FollowingWindowIterator>(input, output_view, preceding_window_begin, following_window_begin, min_periods, agg, stream); } else if (op == aggregation::MAX) { kernel_launcher<T, DeviceMax, aggregation::ARGMAX, PrecedingWindowIterator, FollowingWindowIterator>(input, output_view, preceding_window_begin, following_window_begin, min_periods, agg, stream); } else { CUDF_FAIL("MIN and MAX are the only supported aggregation types for string columns"); } // The rows that represent null elements will be having negative values in gather map, // and that's why nullify_out_of_bounds/ignore_out_of_bounds is true. auto output_table = detail::gather(table_view{{input}}, output->view(), detail::out_of_bounds_policy::IGNORE, detail::negative_index_policy::NOT_ALLOWED, mr, stream); return std::make_unique<cudf::column>(std::move(output_table->get_column(0))); } // Deals with invalid column and/or aggregation options template <typename T, typename agg_op, aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<!cudf::detail::is_rolling_supported<T, agg_op, op>() and !cudf::detail::is_rolling_string_specialization<T, agg_op, op>(), std::unique_ptr<column>> launch(column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_FAIL("Aggregation operator and/or input type combination is invalid"); } template <aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<!(op == aggregation::MEAN), std::unique_ptr<column>> operator()( column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { return launch<InputType, typename corresponding_operator<op>::type, op, PrecedingWindowIterator, FollowingWindowIterator>( input, preceding_window_begin, following_window_begin, min_periods, agg, mr, stream); } // This variant is just to handle mean template <aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<(op == aggregation::MEAN), std::unique_ptr<column>> operator()( column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { return launch<InputType, cudf::DeviceSum, op, PrecedingWindowIterator, FollowingWindowIterator>( input, preceding_window_begin, following_window_begin, min_periods, agg, mr, stream); } }; struct dispatch_rolling { template <typename T, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::unique_ptr<column> operator()(column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { return aggregation_dispatcher(agg->kind, rolling_window_launcher<T>{}, input, preceding_window_begin, following_window_begin, min_periods, agg, mr, stream); } }; } // namespace // Applies a user-defined rolling window function to the values in a column. template <typename PrecedingWindowIterator, typename FollowingWindowIterator> std::unique_ptr<column> rolling_window_udf(column_view const& input, PrecedingWindowIterator preceding_window, std::string const& preceding_window_str, FollowingWindowIterator following_window, std::string const& following_window_str, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, cudaStream_t stream = 0) { static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(), "bitmask_type size does not match CUDA warp size"); if (input.has_nulls()) CUDF_FAIL("Currently the UDF version of rolling window does NOT support inputs with nulls."); min_periods = std::max(min_periods, 0); auto udf_agg = static_cast<udf_aggregation*>(agg.get()); std::string hash = "prog_rolling." + std::to_string(std::hash<std::string>{}(udf_agg->_source)); std::string cuda_source; switch (udf_agg->kind) { case aggregation::Kind::PTX: cuda_source = cudf::rolling::jit::code::kernel_headers; cuda_source += cudf::jit::parse_single_function_ptx(udf_agg->_source, udf_agg->_function_name, cudf::jit::get_type_name(udf_agg->_output_type), {0, 5}); // args 0 and 5 are pointers. cuda_source += cudf::rolling::jit::code::kernel; break; case aggregation::Kind::CUDA: cuda_source = cudf::rolling::jit::code::kernel_headers; cuda_source += cudf::jit::parse_single_function_cuda(udf_agg->_source, udf_agg->_function_name); cuda_source += cudf::rolling::jit::code::kernel; break; default: CUDF_FAIL("Unsupported UDF type."); } std::unique_ptr<column> output = make_numeric_column( udf_agg->_output_type, input.size(), cudf::mask_state::UNINITIALIZED, stream, mr); auto output_view = output->mutable_view(); rmm::device_scalar<size_type> device_valid_count{0, stream}; const std::vector<std::string> compiler_flags{"-std=c++14", // Have jitify prune unused global variables "-remove-unused-globals", // suppress all NVRTC warnings "-w"}; // Launch the jitify kernel cudf::jit::launcher(hash, cuda_source, {cudf_types_hpp, cudf_utilities_bit_hpp, cudf::rolling::jit::code::operation_h, ___src_rolling_rolling_jit_detail_hpp}, compiler_flags, nullptr, stream) .set_kernel_inst("gpu_rolling_new", // name of the kernel we are launching {cudf::jit::get_type_name(input.type()), // list of template arguments cudf::jit::get_type_name(output->type()), udf_agg->_operator_name, preceding_window_str.c_str(), following_window_str.c_str()}) .launch(input.size(), cudf::jit::get_data_ptr(input), input.null_mask(), cudf::jit::get_data_ptr(output_view), output_view.null_mask(), device_valid_count.data(), preceding_window, following_window, min_periods); output->set_null_count(output->size() - device_valid_count.value(stream)); // check the stream for debugging CHECK_CUDA(stream); return output; } /** * @copydoc cudf::rolling_window(column_view const& input, * PrecedingWindowIterator preceding_window_begin, * FollowingWindowIterator following_window_begin, * size_type min_periods, * std::unique_ptr<aggregation> const& agg, * rmm::mr::device_memory_resource* mr) * * @param stream CUDA stream used for device memory operations and kernel launches. */ template <typename PrecedingWindowIterator, typename FollowingWindowIterator> std::unique_ptr<column> rolling_window(column_view const& input, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr, cudaStream_t stream = 0) { static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(), "bitmask_type size does not match CUDA warp size"); min_periods = std::max(min_periods, 0); return cudf::type_dispatcher(input.type(), dispatch_rolling{}, input, preceding_window_begin, following_window_begin, min_periods, agg, mr, stream); } } // namespace detail // Applies a fixed-size rolling window function to the values in a column. std::unique_ptr<column> rolling_window(column_view const& input, size_type preceding_window, size_type following_window, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); if (input.size() == 0) return empty_like(input); CUDF_EXPECTS((min_periods >= 0), "min_periods must be non-negative"); if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) { return cudf::detail::rolling_window_udf(input, preceding_window, "cudf::size_type", following_window, "cudf::size_type", min_periods, agg, mr, 0); } else { auto preceding_window_begin = thrust::make_constant_iterator(preceding_window); auto following_window_begin = thrust::make_constant_iterator(following_window); return cudf::detail::rolling_window( input, preceding_window_begin, following_window_begin, min_periods, agg, mr, 0); } } // Applies a variable-size rolling window function to the values in a column. std::unique_ptr<column> rolling_window(column_view const& input, column_view const& preceding_window, column_view const& following_window, size_type min_periods, std::unique_ptr<aggregation> const& agg, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); if (preceding_window.size() == 0 || following_window.size() == 0 || input.size() == 0) return empty_like(input); CUDF_EXPECTS(preceding_window.type().id() == type_id::INT32 && following_window.type().id() == type_id::INT32, "preceding_window/following_window must have type_id::INT32 type"); CUDF_EXPECTS(preceding_window.size() == input.size() && following_window.size() == input.size(), "preceding_window/following_window size must match input size"); if (agg->kind == aggregation::CUDA || agg->kind == aggregation::PTX) { return cudf::detail::rolling_window_udf(input, preceding_window.begin<size_type>(), "cudf::size_type*", following_window.begin<size_type>(), "cudf::size_type*", min_periods, agg, mr, 0); } else { return cudf::detail::rolling_window(input, preceding_window.begin<size_type>(), following_window.begin<size_type>(), min_periods, agg, mr, 0); } } std::unique_ptr<column> grouped_rolling_window(table_view const& group_keys, column_view const& input, size_type preceding_window, size_type following_window, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); if (input.size() == 0) return empty_like(input); CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()), "Size mismatch between group_keys and input vector."); CUDF_EXPECTS((min_periods > 0), "min_periods must be positive"); if (group_keys.num_columns() == 0) { // No Groupby columns specified. Treat as one big group. return rolling_window(input, preceding_window, following_window, min_periods, aggr, mr); } using sort_groupby_helper = cudf::groupby::detail::sort::sort_groupby_helper; sort_groupby_helper helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES}; auto group_offsets{helper.group_offsets()}; auto const& group_labels{helper.group_labels()}; // `group_offsets` are interpreted in adjacent pairs, each pair representing the offsets // of the first, and one past the last elements in a group. // // If `group_offsets` is not empty, it must contain at least two offsets: // a. 0, indicating the first element in `input` // b. input.size(), indicating one past the last element in `input`. // // Thus, for an input of 1000 rows, // 0. [] indicates a single group, spanning the entire column. // 1 [10] is invalid. // 2. [0, 1000] indicates a single group, spanning the entire column (thus, equivalent to no // groups.) // 3. [0, 500, 1000] indicates two equal-sized groups: [0,500), and [500,1000). assert(group_offsets.size() >= 2 && group_offsets[0] == 0 && group_offsets[group_offsets.size() - 1] == input.size() && "Must have at least one group."); auto preceding_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), preceding_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_start = d_group_offsets[group_label]; return thrust::minimum<size_type>{}(preceding_window, idx - group_start + 1); // Preceding includes current row. }; auto following_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), following_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_end = d_group_offsets[group_label + 1]; // Cannot fall off the end, since offsets is capped with `input.size()`. return thrust::minimum<size_type>{}(following_window, (group_end - 1) - idx); }; if (aggr->kind == aggregation::CUDA || aggr->kind == aggregation::PTX) { cudf::detail::preceding_window_wrapper grouped_preceding_window{ group_offsets.data().get(), group_labels.data().get(), preceding_window}; cudf::detail::following_window_wrapper grouped_following_window{ group_offsets.data().get(), group_labels.data().get(), following_window}; return cudf::detail::rolling_window_udf(input, grouped_preceding_window, "cudf::detail::preceding_window_wrapper", grouped_following_window, "cudf::detail::following_window_wrapper", min_periods, aggr, mr, 0); } else { return cudf::detail::rolling_window( input, thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), preceding_calculator), thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), following_calculator), min_periods, aggr, mr, 0); } } namespace { bool is_supported_range_frame_unit(cudf::data_type const& data_type) { auto id = data_type.id(); return id == cudf::type_id::TIMESTAMP_DAYS || id == cudf::type_id::TIMESTAMP_SECONDS || id == cudf::type_id::TIMESTAMP_MILLISECONDS || id == cudf::type_id::TIMESTAMP_MICROSECONDS || id == cudf::type_id::TIMESTAMP_NANOSECONDS; } /// Fetches multiplication factor to normalize window sizes, depending on the datatype of the /// timestamp column. Used for time-based rolling-window operations. E.g. If the timestamp column is /// in TIMESTAMP_SECONDS, and the window sizes are specified in DAYS, the window size needs to be /// multiplied by `24*60*60`, before comparisons with the timestamps. size_t multiplication_factor(cudf::data_type const& data_type) { // Assume timestamps. switch (data_type.id()) { case cudf::type_id::TIMESTAMP_DAYS: return 1L; case cudf::type_id::TIMESTAMP_SECONDS: return 24L * 60 * 60; case cudf::type_id::TIMESTAMP_MILLISECONDS: return 24L * 60 * 60 * 1000; case cudf::type_id::TIMESTAMP_MICROSECONDS: return 24L * 60 * 60 * 1000 * 1000; default: CUDF_EXPECTS(data_type.id() == cudf::type_id::TIMESTAMP_NANOSECONDS, "Unexpected data-type for timestamp-based rolling window operation!"); return 24L * 60 * 60 * 1000 * 1000 * 1000; } } // Time-range window computation, with // 1. no grouping keys specified // 2. timetamps in ASCENDING order. // Treat as one single group. template <typename TimestampImpl_t> std::unique_ptr<column> time_range_window_ASC(column_view const& input, column_view const& timestamp_column, TimestampImpl_t preceding_window, TimestampImpl_t following_window, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { auto preceding_calculator = [d_timestamps = timestamp_column.data<TimestampImpl_t>(), preceding_window] __device__(size_type idx) { auto group_start = 0; auto lowest_timestamp_in_window = d_timestamps[idx] - preceding_window; return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq, d_timestamps + group_start, d_timestamps + idx, lowest_timestamp_in_window)) + 1; // Add 1, for `preceding` to account for current row. }; auto following_calculator = [num_rows = input.size(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), following_window] __device__(size_type idx) { auto group_end = num_rows; auto highest_timestamp_in_window = d_timestamps[idx] + following_window; return (thrust::upper_bound(thrust::seq, d_timestamps + idx, d_timestamps + group_end, highest_timestamp_in_window) - (d_timestamps + idx)) - 1; }; return cudf::detail::rolling_window( input, thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), preceding_calculator), thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), following_calculator), min_periods, aggr, mr); } // Time-range window computation, for timestamps in ASCENDING order. template <typename TimestampImpl_t> std::unique_ptr<column> time_range_window_ASC( column_view const& input, column_view const& timestamp_column, rmm::device_vector<cudf::size_type> const& group_offsets, rmm::device_vector<cudf::size_type> const& group_labels, TimestampImpl_t preceding_window, TimestampImpl_t following_window, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { auto preceding_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), preceding_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_start = d_group_offsets[group_label]; auto lowest_timestamp_in_window = d_timestamps[idx] - preceding_window; return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq, d_timestamps + group_start, d_timestamps + idx, lowest_timestamp_in_window)) + 1; // Add 1, for `preceding` to account for current row. }; auto following_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), following_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_end = d_group_offsets[group_label + 1]; // Cannot fall off the end, since offsets is capped with `input.size()`. auto highest_timestamp_in_window = d_timestamps[idx] + following_window; return (thrust::upper_bound(thrust::seq, d_timestamps + idx, d_timestamps + group_end, highest_timestamp_in_window) - (d_timestamps + idx)) - 1; }; return cudf::detail::rolling_window( input, thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), preceding_calculator), thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), following_calculator), min_periods, aggr, mr); } // Time-range window computation, with // 1. no grouping keys specified // 2. timetamps in DESCENDING order. // Treat as one single group. template <typename TimestampImpl_t> std::unique_ptr<column> time_range_window_DESC(column_view const& input, column_view const& timestamp_column, TimestampImpl_t preceding_window, TimestampImpl_t following_window, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { auto preceding_calculator = [d_timestamps = timestamp_column.data<TimestampImpl_t>(), preceding_window] __device__(size_type idx) { auto group_start = 0; auto highest_timestamp_in_window = d_timestamps[idx] + preceding_window; return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq, d_timestamps + group_start, d_timestamps + idx, highest_timestamp_in_window, thrust::greater<decltype(highest_timestamp_in_window)>())) + 1; // Add 1, for `preceding` to account for current row. }; auto following_calculator = [num_rows = input.size(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), following_window] __device__(size_type idx) { auto group_end = num_rows; // Cannot fall off the end, since offsets is capped with `input.size()`. auto lowest_timestamp_in_window = d_timestamps[idx] - following_window; return (thrust::upper_bound(thrust::seq, d_timestamps + idx, d_timestamps + group_end, lowest_timestamp_in_window, thrust::greater<decltype(lowest_timestamp_in_window)>()) - (d_timestamps + idx)) - 1; }; return cudf::detail::rolling_window( input, thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), preceding_calculator), thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), following_calculator), min_periods, aggr, mr); } // Time-range window computation, for timestamps in DESCENDING order. template <typename TimestampImpl_t> std::unique_ptr<column> time_range_window_DESC( column_view const& input, column_view const& timestamp_column, rmm::device_vector<cudf::size_type> const& group_offsets, rmm::device_vector<cudf::size_type> const& group_labels, TimestampImpl_t preceding_window, TimestampImpl_t following_window, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { auto preceding_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), preceding_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_start = d_group_offsets[group_label]; auto highest_timestamp_in_window = d_timestamps[idx] + preceding_window; return ((d_timestamps + idx) - thrust::lower_bound(thrust::seq, d_timestamps + group_start, d_timestamps + idx, highest_timestamp_in_window, thrust::greater<decltype(highest_timestamp_in_window)>())) + 1; // Add 1, for `preceding` to account for current row. }; auto following_calculator = [d_group_offsets = group_offsets.data().get(), d_group_labels = group_labels.data().get(), d_timestamps = timestamp_column.data<TimestampImpl_t>(), following_window] __device__(size_type idx) { auto group_label = d_group_labels[idx]; auto group_end = d_group_offsets[group_label + 1]; // Cannot fall off the end, since offsets is capped with `input.size()`. auto lowest_timestamp_in_window = d_timestamps[idx] - following_window; return (thrust::upper_bound(thrust::seq, d_timestamps + idx, d_timestamps + group_end, lowest_timestamp_in_window, thrust::greater<decltype(lowest_timestamp_in_window)>()) - (d_timestamps + idx)) - 1; }; if (aggr->kind == aggregation::CUDA || aggr->kind == aggregation::PTX) { CUDF_FAIL("Time ranged rolling window does NOT (yet) support UDF."); } else { return cudf::detail::rolling_window( input, thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), preceding_calculator), thrust::make_transform_iterator(thrust::make_counting_iterator<size_type>(0), following_calculator), min_periods, aggr, mr, 0); } } template <typename TimestampImpl_t> std::unique_ptr<column> grouped_time_range_rolling_window_impl( column_view const& input, column_view const& timestamp_column, cudf::order const& timestamp_ordering, rmm::device_vector<cudf::size_type> const& group_offsets, rmm::device_vector<cudf::size_type> const& group_labels, size_type preceding_window_in_days, // TODO: Consider taking offset-type as type_id. Assumes days // for now. size_type following_window_in_days, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { TimestampImpl_t mult_factor{ static_cast<TimestampImpl_t>(multiplication_factor(timestamp_column.type()))}; if (timestamp_ordering == cudf::order::ASCENDING) { return (group_offsets.size() == 0) ? time_range_window_ASC(input, timestamp_column, preceding_window_in_days * mult_factor, following_window_in_days * mult_factor, min_periods, aggr, mr) : time_range_window_ASC(input, timestamp_column, group_offsets, group_labels, preceding_window_in_days * mult_factor, following_window_in_days * mult_factor, min_periods, aggr, mr); } else { return (group_offsets.size() == 0) ? time_range_window_DESC(input, timestamp_column, preceding_window_in_days * mult_factor, following_window_in_days * mult_factor, min_periods, aggr, mr) : time_range_window_DESC(input, timestamp_column, group_offsets, group_labels, preceding_window_in_days * mult_factor, following_window_in_days * mult_factor, min_periods, aggr, mr); } } } // namespace std::unique_ptr<column> grouped_time_range_rolling_window(table_view const& group_keys, column_view const& timestamp_column, cudf::order const& timestamp_order, column_view const& input, size_type preceding_window_in_days, size_type following_window_in_days, size_type min_periods, std::unique_ptr<aggregation> const& aggr, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); if (input.size() == 0) return empty_like(input); CUDF_EXPECTS((group_keys.num_columns() == 0 || group_keys.num_rows() == input.size()), "Size mismatch between group_keys and input vector."); CUDF_EXPECTS((min_periods > 0), "min_periods must be positive"); using sort_groupby_helper = cudf::groupby::detail::sort::sort_groupby_helper; using index_vector = sort_groupby_helper::index_vector; index_vector group_offsets, group_labels; if (group_keys.num_columns() > 0) { sort_groupby_helper helper{group_keys, cudf::null_policy::INCLUDE, cudf::sorted::YES}; group_offsets = helper.group_offsets(); group_labels = helper.group_labels(); } // Assumes that `timestamp_column` is actually of a timestamp type. CUDF_EXPECTS(is_supported_range_frame_unit(timestamp_column.type()), "Unsupported data-type for `timestamp`-based rolling window operation!"); return timestamp_column.type().id() == cudf::type_id::TIMESTAMP_DAYS ? grouped_time_range_rolling_window_impl<int32_t>(input, timestamp_column, timestamp_order, group_offsets, group_labels, preceding_window_in_days, following_window_in_days, min_periods, aggr, mr) : grouped_time_range_rolling_window_impl<int64_t>(input, timestamp_column, timestamp_order, group_offsets, group_labels, preceding_window_in_days, following_window_in_days, min_periods, aggr, mr); } } // namespace cudf
3e7318c61d73cd8fcb1c07bab0acfaca3ca2cc58.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void randomWalk(double *results, int *crossTimes, int T, int N, int numSims, double upperThreshold, double deviceID) { // a variable to keep track of this simulation's position in the crossTimes array int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x; if (crossTimeIndex < numSims) { // create random number generator hiprandState_t state; hiprand_init (blockIdx.x * (1000 * deviceID) + threadIdx.x + clock64(), 0, 0, &state); double random; // starting position of this siulation in results array int start = (threadIdx.x + blockIdx.x * blockDim.x) * N; // set default value of cross time for this simulation to 0, since the simulation hasn't crossed the threshold yet crossTimes[crossTimeIndex] = 0; // starting point of path is 0 results[start] = 0.0; // boolean to keep track of whether this path has crossed bool crossed = false; for (int j = start + 1; j < start + N; j++) { // generate random number random = hiprand_normal_double(&state); //calculate next step of path results[j] = results[j-1] + random * sqrt((double) T / N); // store crossing time as positive value if it has crossed the upper threshold. Negative value if crossed the lower threshold if (!crossed && results[j] >= upperThreshold) { crossTimes[crossTimeIndex] = j - start; crossed = true; } } } }
3e7318c61d73cd8fcb1c07bab0acfaca3ca2cc58.cu
#include "includes.h" __global__ void randomWalk(double *results, int *crossTimes, int T, int N, int numSims, double upperThreshold, double deviceID) { // a variable to keep track of this simulation's position in the crossTimes array int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x; if (crossTimeIndex < numSims) { // create random number generator curandState_t state; curand_init (blockIdx.x * (1000 * deviceID) + threadIdx.x + clock64(), 0, 0, &state); double random; // starting position of this siulation in results array int start = (threadIdx.x + blockIdx.x * blockDim.x) * N; // set default value of cross time for this simulation to 0, since the simulation hasn't crossed the threshold yet crossTimes[crossTimeIndex] = 0; // starting point of path is 0 results[start] = 0.0; // boolean to keep track of whether this path has crossed bool crossed = false; for (int j = start + 1; j < start + N; j++) { // generate random number random = curand_normal_double(&state); //calculate next step of path results[j] = results[j-1] + random * sqrt((double) T / N); // store crossing time as positive value if it has crossed the upper threshold. Negative value if crossed the lower threshold if (!crossed && results[j] >= upperThreshold) { crossTimes[crossTimeIndex] = j - start; crossed = true; } } } }
57023ba297949187230ca5ce4c2b461934e94184.hip
// !!! This is a file automatically generated by hipify!!! #include "testfuncs.cuh" __host__ void test_updateSimsAndPropensities() { const int s = 3; const int n = 3; const int m = 3; const int max_reactants = 2; const int max_affected = 3; double propensities[s * m] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; double reaction_rates[n] = { 1, 2, 0.5 }; // 2A -> B, 1 // A 2B -> C, 2 // C -> 2C, 0.5 int reactants[n * max_reactants * 2] = { 0, 2, 0, 0, 0, 1, 1, 2, 2, 1, 0, 0 }; int state_changes[m * n] = { -2, 1, 0, -1, -2, 1, 0, 0, 1 }; int dep_matrix[n * max_affected] = { 0, 1, -1, 0, 1, 2, 2, -1, -1 }; int sim_configs[m * s] = { 5, 5, 5, 5, 5, 5, 5, 5, 5 }; int fired_reactions[s] = { 0, 1, 2 }; bool stability_flags[s] = { false, false, false }; double* dev_props = 0; double* dev_rates = 0; int* dev_reactants = 0; int* dev_state_changes = 0; int* dev_dep_matrix = 0; int* dev_fired_reactions = 0; int* dev_sim_configs = 0; bool* dev_stability_flags = 0; hipMalloc(&dev_props, s * m * sizeof(double)); hipMalloc(&dev_rates, m * sizeof(double)); hipMalloc(&dev_reactants, n * max_reactants * 2 * sizeof(int)); hipMalloc(&dev_state_changes, m * n * sizeof(int)); hipMalloc(&dev_dep_matrix, n * max_affected * sizeof(int)); hipMalloc(&dev_sim_configs, m * s * sizeof(int)); hipMalloc(&dev_fired_reactions, s * sizeof(int)); hipMalloc(&dev_stability_flags, s * sizeof(bool)); hipMemcpy(dev_props, propensities, s * m * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_rates, reaction_rates, m * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_reactants, reactants, n * max_reactants * 2 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_state_changes, state_changes, m * n * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_dep_matrix, dep_matrix, n * max_affected * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_sim_configs, sim_configs, m * s * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_fired_reactions, fired_reactions, s * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_stability_flags, stability_flags, s * sizeof(bool), hipMemcpyHostToDevice); dim3 grid(1, 1); dim3 threads(16, 16); //updateSims(s, n, dev_fired_reactions, dev_sim_configs, dev_state_changes, dev_stability_flags); //updateProps(s, n, m, max_reactants, dev_sim_configs, dev_reactants, dev_rates, dev_props); hipMemcpy(propensities, dev_props, s * m * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(reaction_rates, dev_rates, m * sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(reactants, dev_reactants, n * max_reactants * 2 * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(state_changes, dev_state_changes, m * n * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(dep_matrix, dev_dep_matrix, n * max_affected * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(sim_configs, dev_sim_configs, m * s * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(fired_reactions, dev_fired_reactions, s * sizeof(int), hipMemcpyDeviceToHost); std::cout << "Simulation Configs" << '\n'; for (int i = 0; i < s * n; i++) { std::cout << sim_configs[i] << " "; } std::cout << "\n\n"; std::cout << "Propensities" << '\n'; for (int i = 0; i < s * m; i++) { std::cout << propensities[i] << " "; } } __host__ bool test_scaleRVs(int s, int m, bool verbose) { thrust::host_vector<double> propensities(s * m); for (int i = 0; i < s; i++) { for (int j = 0; j < m; j++) { propensities[i * m + j] = j + 1; } } // Construct keys for row-wise inclusive scan thrust::host_vector<int> keys(s * m); for (int i = 0; i < s; i++) { for (int j = 0; j < m; j++) { keys[i * m + j] = i; // key for each element in a row is just its row number } } thrust::host_vector<double> propscan(s * m); thrust::inclusive_scan_by_key(thrust::host, keys.begin(), keys.end(), propensities.begin(), propscan.begin()); std::default_random_engine generator; std::uniform_real_distribution<double> distribution(0.0, 1.0); thrust::host_vector<double> uniformRVs(s); for (int i = 0; i < s; i++) { uniformRVs[i] = distribution(generator); } thrust::host_vector<double> scaledRVs(s); for (int i = 0; i < s; i++) { scaledRVs[i] = uniformRVs[i] * propscan[m + i * m - 1]; } thrust::device_vector<double> dev_propscan = propscan; thrust::device_vector<double> dev_uniformRVs = uniformRVs; //scaleRVs(thrust::raw_pointer_cast(dev_uniformRVs.data()), thrust::raw_pointer_cast(dev_propscan.data()), s, m); thrust::host_vector<double> par_scaledRVs = dev_uniformRVs; int fail_index = -1; for (int i = 0; i < s; i++) { if (par_scaledRVs[i] != scaledRVs[i]) { fail_index = i; break; } } if (fail_index != -1) { std::cout << "Failure for s = " << s << " and m = " << m << '\n'; std::cout << "Fails first at index i = " << fail_index << "\n\n"; if (verbose) { std::cout << "Random Variables" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << uniformRVs[i] << ", "; } std::cout << "}\n\n"; std::cout << "Propensity Sums" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << propscan[m + i * m - 1] << ", "; } std::cout << "}\n\n"; std::cout << "Host Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << scaledRVs[i] << ", "; } std::cout << "}\n\n"; std::cout << "Device Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << par_scaledRVs[i] << ", "; } std::cout << "}\n\n"; } return false; } else { std::cout << "Success for s = " << s << " and m = " << m << "\n\n"; if (verbose) { std::cout << "Host Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << scaledRVs[i] << ", "; } std::cout << "}\n\n"; std::cout << "Device Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << par_scaledRVs[i] << ", "; } std::cout << "}\n\n"; } return true; } } __host__ bool test_checkBins(int s, int m, bool verbose) { thrust::host_vector<double> propensities(s * m); for (int i = 0; i < s; i++) { for (int j = 0; j < m; j++) { propensities[i * m + j] = j + 1; } } // Construct keys for row-wise inclusive scan thrust::host_vector<int> keys(s * m); for (int i = 0; i < s; i++) { for (int j = 0; j < m; j++) { keys[i * m + j] = i; // key for each element in a row is just its row number } } thrust::host_vector<double> propscan(s * m); thrust::inclusive_scan_by_key(thrust::host, keys.begin(), keys.end(), propensities.begin(), propscan.begin()); std::default_random_engine generator; std::uniform_real_distribution<double> distribution(0.0, 1.0); thrust::host_vector<double> uniformRVs(s); for (int i = 0; i < s; i++) { uniformRVs[i] = distribution(generator); } thrust::device_vector<double> dev_propscan = propscan; thrust::device_vector<double> dev_uniformRVs = uniformRVs; //scaleRVs(thrust::raw_pointer_cast(dev_uniformRVs.data()), thrust::raw_pointer_cast(dev_propscan.data()), s, m); // move scaled RVs back to host uniformRVs = dev_uniformRVs; thrust::host_vector<int> bins(s); thrust::device_vector<int> dev_bins(s); thrust::fill(dev_bins.begin(), dev_bins.end(), -1); // Compute bins on host for (int i = 0; i < s; i++) { double rv = uniformRVs[i]; for (int j = 0; j < m; j++) { double left_edge, right_edge; if (j == 0) { left_edge = 0; right_edge = propscan[i * m + j]; } else { left_edge = propscan[i * m + j - 1]; right_edge = propscan[i * m + j]; } // Push right edge over to include total for last element if (j == m - 1) { right_edge += 1; } if (left_edge <= rv && rv < right_edge) { bins[i] = j; }; } } //checkBins(thrust::raw_pointer_cast(dev_propscan.data()), thrust::raw_pointer_cast(dev_uniformRVs.data()), thrust::raw_pointer_cast(dev_bins.data()), s, m); thrust::host_vector<int> par_bins = dev_bins; int fail_index = -1; for (int i = 0; i < s; i++) { if (par_bins[i] != bins[i]) { fail_index = i; break; } } if (fail_index != -1) { std::cout << "Failure for s = " << s << " and m = " << m << '\n'; std::cout << "Fails first at index i = " << fail_index << "\n\n"; if (verbose) { std::cout << "Random Variables" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << uniformRVs[i] << ", "; } std::cout << "}\n\n\n"; std::cout << "Propensity Sums" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << propscan[m + i * m - 1] << ", "; } std::cout << "}\n\n"; std::cout << "Host Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << bins[i] << ", "; } std::cout << "}\n\n"; std::cout << "Device Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << par_bins[i] << ", "; } std::cout << "}\n\n"; } return false; } else { std::cout << "Success for s = " << s << " and m = " << m << "\n\n"; if (verbose) { std::cout << "Host Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << bins[i] << ", "; } std::cout << "}\n\n"; std::cout << "Device Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << par_bins[i] << ", "; } std::cout << "}\n\n"; } return true; } }
57023ba297949187230ca5ce4c2b461934e94184.cu
#include "testfuncs.cuh" __host__ void test_updateSimsAndPropensities() { const int s = 3; const int n = 3; const int m = 3; const int max_reactants = 2; const int max_affected = 3; double propensities[s * m] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; double reaction_rates[n] = { 1, 2, 0.5 }; // 2A -> B, 1 // A 2B -> C, 2 // C -> 2C, 0.5 int reactants[n * max_reactants * 2] = { 0, 2, 0, 0, 0, 1, 1, 2, 2, 1, 0, 0 }; int state_changes[m * n] = { -2, 1, 0, -1, -2, 1, 0, 0, 1 }; int dep_matrix[n * max_affected] = { 0, 1, -1, 0, 1, 2, 2, -1, -1 }; int sim_configs[m * s] = { 5, 5, 5, 5, 5, 5, 5, 5, 5 }; int fired_reactions[s] = { 0, 1, 2 }; bool stability_flags[s] = { false, false, false }; double* dev_props = 0; double* dev_rates = 0; int* dev_reactants = 0; int* dev_state_changes = 0; int* dev_dep_matrix = 0; int* dev_fired_reactions = 0; int* dev_sim_configs = 0; bool* dev_stability_flags = 0; cudaMalloc(&dev_props, s * m * sizeof(double)); cudaMalloc(&dev_rates, m * sizeof(double)); cudaMalloc(&dev_reactants, n * max_reactants * 2 * sizeof(int)); cudaMalloc(&dev_state_changes, m * n * sizeof(int)); cudaMalloc(&dev_dep_matrix, n * max_affected * sizeof(int)); cudaMalloc(&dev_sim_configs, m * s * sizeof(int)); cudaMalloc(&dev_fired_reactions, s * sizeof(int)); cudaMalloc(&dev_stability_flags, s * sizeof(bool)); cudaMemcpy(dev_props, propensities, s * m * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_rates, reaction_rates, m * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_reactants, reactants, n * max_reactants * 2 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_state_changes, state_changes, m * n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_dep_matrix, dep_matrix, n * max_affected * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_sim_configs, sim_configs, m * s * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_fired_reactions, fired_reactions, s * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_stability_flags, stability_flags, s * sizeof(bool), cudaMemcpyHostToDevice); dim3 grid(1, 1); dim3 threads(16, 16); //updateSims(s, n, dev_fired_reactions, dev_sim_configs, dev_state_changes, dev_stability_flags); //updateProps(s, n, m, max_reactants, dev_sim_configs, dev_reactants, dev_rates, dev_props); cudaMemcpy(propensities, dev_props, s * m * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(reaction_rates, dev_rates, m * sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(reactants, dev_reactants, n * max_reactants * 2 * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(state_changes, dev_state_changes, m * n * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(dep_matrix, dev_dep_matrix, n * max_affected * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(sim_configs, dev_sim_configs, m * s * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(fired_reactions, dev_fired_reactions, s * sizeof(int), cudaMemcpyDeviceToHost); std::cout << "Simulation Configs" << '\n'; for (int i = 0; i < s * n; i++) { std::cout << sim_configs[i] << " "; } std::cout << "\n\n"; std::cout << "Propensities" << '\n'; for (int i = 0; i < s * m; i++) { std::cout << propensities[i] << " "; } } __host__ bool test_scaleRVs(int s, int m, bool verbose) { thrust::host_vector<double> propensities(s * m); for (int i = 0; i < s; i++) { for (int j = 0; j < m; j++) { propensities[i * m + j] = j + 1; } } // Construct keys for row-wise inclusive scan thrust::host_vector<int> keys(s * m); for (int i = 0; i < s; i++) { for (int j = 0; j < m; j++) { keys[i * m + j] = i; // key for each element in a row is just its row number } } thrust::host_vector<double> propscan(s * m); thrust::inclusive_scan_by_key(thrust::host, keys.begin(), keys.end(), propensities.begin(), propscan.begin()); std::default_random_engine generator; std::uniform_real_distribution<double> distribution(0.0, 1.0); thrust::host_vector<double> uniformRVs(s); for (int i = 0; i < s; i++) { uniformRVs[i] = distribution(generator); } thrust::host_vector<double> scaledRVs(s); for (int i = 0; i < s; i++) { scaledRVs[i] = uniformRVs[i] * propscan[m + i * m - 1]; } thrust::device_vector<double> dev_propscan = propscan; thrust::device_vector<double> dev_uniformRVs = uniformRVs; //scaleRVs(thrust::raw_pointer_cast(dev_uniformRVs.data()), thrust::raw_pointer_cast(dev_propscan.data()), s, m); thrust::host_vector<double> par_scaledRVs = dev_uniformRVs; int fail_index = -1; for (int i = 0; i < s; i++) { if (par_scaledRVs[i] != scaledRVs[i]) { fail_index = i; break; } } if (fail_index != -1) { std::cout << "Failure for s = " << s << " and m = " << m << '\n'; std::cout << "Fails first at index i = " << fail_index << "\n\n"; if (verbose) { std::cout << "Random Variables" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << uniformRVs[i] << ", "; } std::cout << "}\n\n"; std::cout << "Propensity Sums" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << propscan[m + i * m - 1] << ", "; } std::cout << "}\n\n"; std::cout << "Host Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << scaledRVs[i] << ", "; } std::cout << "}\n\n"; std::cout << "Device Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << par_scaledRVs[i] << ", "; } std::cout << "}\n\n"; } return false; } else { std::cout << "Success for s = " << s << " and m = " << m << "\n\n"; if (verbose) { std::cout << "Host Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << scaledRVs[i] << ", "; } std::cout << "}\n\n"; std::cout << "Device Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << par_scaledRVs[i] << ", "; } std::cout << "}\n\n"; } return true; } } __host__ bool test_checkBins(int s, int m, bool verbose) { thrust::host_vector<double> propensities(s * m); for (int i = 0; i < s; i++) { for (int j = 0; j < m; j++) { propensities[i * m + j] = j + 1; } } // Construct keys for row-wise inclusive scan thrust::host_vector<int> keys(s * m); for (int i = 0; i < s; i++) { for (int j = 0; j < m; j++) { keys[i * m + j] = i; // key for each element in a row is just its row number } } thrust::host_vector<double> propscan(s * m); thrust::inclusive_scan_by_key(thrust::host, keys.begin(), keys.end(), propensities.begin(), propscan.begin()); std::default_random_engine generator; std::uniform_real_distribution<double> distribution(0.0, 1.0); thrust::host_vector<double> uniformRVs(s); for (int i = 0; i < s; i++) { uniformRVs[i] = distribution(generator); } thrust::device_vector<double> dev_propscan = propscan; thrust::device_vector<double> dev_uniformRVs = uniformRVs; //scaleRVs(thrust::raw_pointer_cast(dev_uniformRVs.data()), thrust::raw_pointer_cast(dev_propscan.data()), s, m); // move scaled RVs back to host uniformRVs = dev_uniformRVs; thrust::host_vector<int> bins(s); thrust::device_vector<int> dev_bins(s); thrust::fill(dev_bins.begin(), dev_bins.end(), -1); // Compute bins on host for (int i = 0; i < s; i++) { double rv = uniformRVs[i]; for (int j = 0; j < m; j++) { double left_edge, right_edge; if (j == 0) { left_edge = 0; right_edge = propscan[i * m + j]; } else { left_edge = propscan[i * m + j - 1]; right_edge = propscan[i * m + j]; } // Push right edge over to include total for last element if (j == m - 1) { right_edge += 1; } if (left_edge <= rv && rv < right_edge) { bins[i] = j; }; } } //checkBins(thrust::raw_pointer_cast(dev_propscan.data()), thrust::raw_pointer_cast(dev_uniformRVs.data()), thrust::raw_pointer_cast(dev_bins.data()), s, m); thrust::host_vector<int> par_bins = dev_bins; int fail_index = -1; for (int i = 0; i < s; i++) { if (par_bins[i] != bins[i]) { fail_index = i; break; } } if (fail_index != -1) { std::cout << "Failure for s = " << s << " and m = " << m << '\n'; std::cout << "Fails first at index i = " << fail_index << "\n\n"; if (verbose) { std::cout << "Random Variables" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << uniformRVs[i] << ", "; } std::cout << "}\n\n\n"; std::cout << "Propensity Sums" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << propscan[m + i * m - 1] << ", "; } std::cout << "}\n\n"; std::cout << "Host Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << bins[i] << ", "; } std::cout << "}\n\n"; std::cout << "Device Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << par_bins[i] << ", "; } std::cout << "}\n\n"; } return false; } else { std::cout << "Success for s = " << s << " and m = " << m << "\n\n"; if (verbose) { std::cout << "Host Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << bins[i] << ", "; } std::cout << "}\n\n"; std::cout << "Device Result" << '\n'; std::cout << "{"; for (int i = 0; i < s; i++) { std::cout << par_bins[i] << ", "; } std::cout << "}\n\n"; } return true; } }
6c114a8c901db6ea6b9cfd0c1d0b11151f0381e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SpectRE - A Spectral Code for Reheating * Copyright (C) 2009-2010 Hal Finkel, Nathaniel Roth and Richard Easther * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "reduction_helper.hpp" #include "v_integrator.hpp" #include <iostream> #include <fstream> #include <iomanip> using namespace std; /** * Returns the value of the field potential at a point given the values of the fields at that point. * The field values are sent in program units, and the potential is returned in program units. * This is equation 6.5 from the LatticeEasy manual. */ __global__ void v_integrator_kernel(double *phi, IF_CHI_ARG(double *chi,) double *total_V, double a_t) { int x = blockIdx.x; int y = blockIdx.y; int z = threadIdx.x; int coeffx = 2 + 2 * (x & 0x1); int coeffy = 2 + 2 * (y & 0x1); int coeffz = 2 + 2 * (z & 0x1); int ldl = 2*(NGRIDSIZE/2+1); int idx = z + ldl*(y + NGRIDSIZE*x); int idx_V = z + NGRIDSIZE*(y + NGRIDSIZE*x); total_V[idx_V] = coeffx * coeffy * coeffz * model_params::V(phi[idx], IF_CHI_ARG(chi[idx],) a_t); } // Integrate the potential. Returns the average value. template <typename R> R v_integrator<R>::integrate(field<R> &phi, IF_CHI_ARG(field<R> &chi,) R a_t) { phi.switch_state(position); IF_CHI(chi.switch_state(position)); auto total_V_arr = double_array_gpu(NGRIDSIZE, NGRIDSIZE, NGRIDSIZE); dim3 nr_blocks(NGRIDSIZE, NGRIDSIZE); dim3 nr_threads(NGRIDSIZE, 1); hipLaunchKernelGGL(( v_integrator_kernel), dim3(nr_blocks), dim3(nr_threads), 0, 0, phi.data.ptr, IF_CHI_ARG(chi.data.ptr,) total_V_arr.ptr(), a_t); double total_V = total_V_arr.sum(); return total_V / (3.0 * 3 * 3 * NTOTAL_GRIDPOINTS); } // Explicit instantiations template class v_integrator<double>;
6c114a8c901db6ea6b9cfd0c1d0b11151f0381e1.cu
/* * SpectRE - A Spectral Code for Reheating * Copyright (C) 2009-2010 Hal Finkel, Nathaniel Roth and Richard Easther * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "reduction_helper.hpp" #include "v_integrator.hpp" #include <iostream> #include <fstream> #include <iomanip> using namespace std; /** * Returns the value of the field potential at a point given the values of the fields at that point. * The field values are sent in program units, and the potential is returned in program units. * This is equation 6.5 from the LatticeEasy manual. */ __global__ void v_integrator_kernel(double *phi, IF_CHI_ARG(double *chi,) double *total_V, double a_t) { int x = blockIdx.x; int y = blockIdx.y; int z = threadIdx.x; int coeffx = 2 + 2 * (x & 0x1); int coeffy = 2 + 2 * (y & 0x1); int coeffz = 2 + 2 * (z & 0x1); int ldl = 2*(NGRIDSIZE/2+1); int idx = z + ldl*(y + NGRIDSIZE*x); int idx_V = z + NGRIDSIZE*(y + NGRIDSIZE*x); total_V[idx_V] = coeffx * coeffy * coeffz * model_params::V(phi[idx], IF_CHI_ARG(chi[idx],) a_t); } // Integrate the potential. Returns the average value. template <typename R> R v_integrator<R>::integrate(field<R> &phi, IF_CHI_ARG(field<R> &chi,) R a_t) { phi.switch_state(position); IF_CHI(chi.switch_state(position)); auto total_V_arr = double_array_gpu(NGRIDSIZE, NGRIDSIZE, NGRIDSIZE); dim3 nr_blocks(NGRIDSIZE, NGRIDSIZE); dim3 nr_threads(NGRIDSIZE, 1); v_integrator_kernel<<<nr_blocks, nr_threads>>>(phi.data.ptr, IF_CHI_ARG(chi.data.ptr,) total_V_arr.ptr(), a_t); double total_V = total_V_arr.sum(); return total_V / (3.0 * 3 * 3 * NTOTAL_GRIDPOINTS); } // Explicit instantiations template class v_integrator<double>;
91360ef1087533f8802e5bec5dd4174942a78998.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "star2d2r-512-8-256_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_6_3; float __reg_6_4; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_7_3; float __reg_7_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_7_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 29); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 29); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0); __STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1); } } else { for (__h = 33; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_6_3; float __reg_6_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_6_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0); __STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1); __STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2); } } else { for (__h = 29; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_5_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, __h + 2); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, __h + 2); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, __h + 3); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); } } else { for (__h = 25; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_4_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, __h + 3); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); } } else { for (__h = 21; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_3_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, __h + 3); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_2_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); } } else { for (__h = 13; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_1_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); } } else { for (__h = 9; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); } } else { for (__h = 5; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; } }
91360ef1087533f8802e5bec5dd4174942a78998.cu
#include "star2d2r-512-8-256_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_6_3; float __reg_6_4; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_7_3; float __reg_7_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_7_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, 29); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 29); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 30); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 31); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 32); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0); __STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0); __STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1); } } else { for (__h = 33; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_6_3; float __reg_6_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_6_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 25); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 26); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 27); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 28); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0); __STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1); __STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2); } } else { for (__h = 29; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_5_3; float __reg_5_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_5_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 21); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 22); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 23); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 24); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, __h + 2); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1); __STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __LOAD(__reg_0_2, __h + 2); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_3, __h + 3); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); __STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3); } } else { for (__h = 25; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_4_3; float __reg_4_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_4_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 18); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 19); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 20); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __LOAD(__reg_0_3, __h + 2); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __LOAD(__reg_0_4, __h + 3); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); __STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3); __STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4); } } else { for (__h = 21; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_3_3; float __reg_3_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_3_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __LOAD(__reg_0_3, 13); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 14); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __LOAD(__reg_0_3, __h + 1); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_4, __h + 2); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __LOAD(__reg_0_0, __h + 3); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); __STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4); __STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_2_3; float __reg_2_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_2_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); } } else { for (__h = 13; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_1_3; float __reg_1_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_1_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); } } else { for (__h = 9; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_0_3; float __reg_0_4; __shared__ float __c_sb_double[__blockSize * 2]; float *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); } } else { for (__h = 5; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; } }
f9f151ab2d6cbb8113d7ff144c01f6ef294b99cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This is problem 2, page ranking // The problem is to compute the rank of a set of webpages // given a link graph, aka a graph where each node is a webpage, // and each edge is a link from one page to another. // We're going to use the Pagerank algorithm (http://en.wikipedia.org/wiki/Pagerank), // specifically the iterative algorithm for calculating the rank of a page // We're going to run 6 iterations of the propage step. // Implement the corresponding code in CUDA. #include <cstdlib> #include <iostream> #include <iomanip> #include <cassert> #include <ctime> #include <limits> #include <vector> #include "util.cuh" #include "pagerank.cuh" event_pair timer; typedef unsigned int uint; // amount of floating point numbers between answer and computed value // for the answer to be taken correctly. 2's complement magick. constexpr int MAX_ULPS = 1000000; constexpr int NUM_ITERATIONS = 6; void host_graph_propagate( uint *graph_indices, uint *graph_edges, float *graph_nodes_in, float *graph_nodes_out, float *inv_edges_per_node, int num_nodes ) { // for each node for (int i = 0; i < num_nodes; i++) { float sum = 0.f; // for all of its edges for (uint j = graph_indices[i]; j < graph_indices[i + 1]; j++) { sum += graph_nodes_in[graph_edges[j]] * inv_edges_per_node[graph_edges[j]]; } graph_nodes_out[i] = 0.5f / (float)num_nodes + 0.5f * sum; } } double host_graph_iterate( uint *graph_indices, uint *graph_edges, float *graph_nodes_in, float *graph_nodes_out, float *inv_edges_per_node, int nr_iterations, int num_nodes ){ float *buffer_1 = new float[num_nodes]; float *buffer_2 = new float[num_nodes]; memcpy(buffer_1, graph_nodes_in, num_nodes * sizeof(float)); start_timer(&timer); for(int iter = 0; iter < nr_iterations / 2; iter++) { host_graph_propagate(graph_indices, graph_edges, buffer_1, buffer_2, inv_edges_per_node, num_nodes); host_graph_propagate(graph_indices, graph_edges, buffer_2, buffer_1, inv_edges_per_node, num_nodes); } // handle the odd case and copy memory to the output location if (nr_iterations % 2) { host_graph_propagate(graph_indices, graph_edges, buffer_1, buffer_2, inv_edges_per_node, num_nodes); memcpy(graph_nodes_out, buffer_2, num_nodes * sizeof(float)); } else { memcpy(graph_nodes_out, buffer_1, num_nodes * sizeof(float)); } double cpu_elapsed_time = stop_timer(&timer); delete[] buffer_1; delete[] buffer_2; return cpu_elapsed_time; } void generateGraph( int num_nodes, int avg_edges, std::vector<uint>& h_graph_indices, std::vector<uint>& h_graph_edges, std::vector<float>& h_inv_edges_per_node, std::vector<float>& h_node_values_input, std::vector<float>& h_gpu_node_values_output, std::vector<float>& h_cpu_node_values_output ){ h_graph_indices.resize(num_nodes + 1); h_node_values_input.resize(num_nodes); h_inv_edges_per_node.resize(num_nodes, 0.f); h_graph_edges.resize(num_nodes * avg_edges); h_gpu_node_values_output.resize(num_nodes); h_cpu_node_values_output.resize(num_nodes); h_graph_indices[0] = 0; int nodes_per_block = num_nodes / (avg_edges * 2 - 1) + 1; for (int i = 0; i < num_nodes; i++) { // each node has a deterministic number of edges that goes // 1, 1, 1, 1, ..., 2, 2, 2, 2, ..., 2 * avg_edges - 1 int nr_edges = i / nodes_per_block + 1; h_graph_indices[i+1] = h_graph_indices[i] + nr_edges; //assign a random node for each edge for (uint j = h_graph_indices[i]; j < h_graph_indices[i + 1]; j++) { // assign out node h_graph_edges[j] = rand() % num_nodes; // increment out degree h_inv_edges_per_node[h_graph_edges[j]]++; } h_node_values_input[i] = 1.f / (float) num_nodes; } // invert the out degree for (int i = 0; i < num_nodes; i++) { h_inv_edges_per_node[i] = 1.f / h_inv_edges_per_node[i]; } } void checkErrors( const std::vector<float>& h_gpu_node_values_output, const std::vector<float>& h_cpu_node_values_output ) { assert(h_gpu_node_values_output.size() == h_cpu_node_values_output.size()); int num_errors = 0; for (uint i = 0 ; i < h_gpu_node_values_output.size(); i++) { float n = h_gpu_node_values_output[i]; float c = h_cpu_node_values_output[i]; if (AlmostEqualUlps(n, c, MAX_ULPS)) continue; // error case // num_errors++; if (num_errors < 10) { std::cerr << "Mismatch at node " << i << std::endl; std::cerr << "Expected " << c << " and got " << n << std::endl; } else { std::cerr << "\nToo many errors, quitting" << std::endl; exit(1); } } if (num_errors) { std::cerr << "There were errors, quitting" << std::endl; exit(1); } } int main() { // initalize CUDA and warmup kernel to avoid including these costs in the timings hipFree(0); hipLaunchKernelGGL(( device_graph_propagate), dim3(1), dim3(1), 0, 0, nullptr, nullptr, nullptr, nullptr, nullptr, 0); std::vector<uint> num_nodes; std::vector<uint> avg_edges; num_nodes.push_back(1 << 15); for (uint i = 0; i < 5; ++i) num_nodes.push_back(num_nodes.back() * 2); for (uint i = 2; i < 20; ++i) avg_edges.push_back(i); // index array has to be n+1 so that the last thread can // still look at its neighbor for a stopping point std::vector<uint> h_graph_indices; std::vector<uint> h_graph_edges; std::vector<float> h_inv_edges_per_node; std::vector<float> h_node_values_input; std::vector<float> h_gpu_node_values_output; std::vector<float> h_cpu_node_values_output; // generate random input // initialize srand(time(nullptr)); std::cout << std::setw(60) << "Device Bandwidth GB/sec" << std::endl << std::endl; std::cout << std::setw(66) << "Number of nodes\n" << std::setw(15) << " "; for (const uint node : num_nodes) std::cout << std::setw(15) << node; std::cout << std::endl; std::cout << std::setw(16) << "Avg. no. edges\n"; for (const uint edge : avg_edges) { std::cout << std::setw(15) << edge; for (const uint node : num_nodes) { generateGraph(node, edge, h_graph_indices, h_graph_edges, h_inv_edges_per_node, h_node_values_input, h_gpu_node_values_output, h_cpu_node_values_output); // generate gpu output & timings double gpu_time = device_graph_iterate( h_graph_indices.data(), h_graph_edges.data(), h_node_values_input.data(), h_gpu_node_values_output.data(), h_inv_edges_per_node.data(), NUM_ITERATIONS, node, edge ); // generate reference output host_graph_iterate( h_graph_indices.data(), h_graph_edges.data(), h_node_values_input.data(), h_cpu_node_values_output.data(), h_inv_edges_per_node.data(), NUM_ITERATIONS, node ); // check CUDA output versus reference output checkErrors(h_gpu_node_values_output, h_cpu_node_values_output); // TODO: fill in the calculation for totalBytes uint totalBytes = (node*sizeof(float) + node*edge*(sizeof(uint)+2*sizeof(float)))*NUM_ITERATIONS; std::cout << std::setw(15) << std::fixed << std::setprecision(2) << totalBytes / (gpu_time / 1000.) / 1E9 << std::flush; } std::cout << std::endl; } return 0; }
f9f151ab2d6cbb8113d7ff144c01f6ef294b99cf.cu
// This is problem 2, page ranking // The problem is to compute the rank of a set of webpages // given a link graph, aka a graph where each node is a webpage, // and each edge is a link from one page to another. // We're going to use the Pagerank algorithm (http://en.wikipedia.org/wiki/Pagerank), // specifically the iterative algorithm for calculating the rank of a page // We're going to run 6 iterations of the propage step. // Implement the corresponding code in CUDA. #include <cstdlib> #include <iostream> #include <iomanip> #include <cassert> #include <ctime> #include <limits> #include <vector> #include "util.cuh" #include "pagerank.cuh" event_pair timer; typedef unsigned int uint; // amount of floating point numbers between answer and computed value // for the answer to be taken correctly. 2's complement magick. constexpr int MAX_ULPS = 1000000; constexpr int NUM_ITERATIONS = 6; void host_graph_propagate( uint *graph_indices, uint *graph_edges, float *graph_nodes_in, float *graph_nodes_out, float *inv_edges_per_node, int num_nodes ) { // for each node for (int i = 0; i < num_nodes; i++) { float sum = 0.f; // for all of its edges for (uint j = graph_indices[i]; j < graph_indices[i + 1]; j++) { sum += graph_nodes_in[graph_edges[j]] * inv_edges_per_node[graph_edges[j]]; } graph_nodes_out[i] = 0.5f / (float)num_nodes + 0.5f * sum; } } double host_graph_iterate( uint *graph_indices, uint *graph_edges, float *graph_nodes_in, float *graph_nodes_out, float *inv_edges_per_node, int nr_iterations, int num_nodes ){ float *buffer_1 = new float[num_nodes]; float *buffer_2 = new float[num_nodes]; memcpy(buffer_1, graph_nodes_in, num_nodes * sizeof(float)); start_timer(&timer); for(int iter = 0; iter < nr_iterations / 2; iter++) { host_graph_propagate(graph_indices, graph_edges, buffer_1, buffer_2, inv_edges_per_node, num_nodes); host_graph_propagate(graph_indices, graph_edges, buffer_2, buffer_1, inv_edges_per_node, num_nodes); } // handle the odd case and copy memory to the output location if (nr_iterations % 2) { host_graph_propagate(graph_indices, graph_edges, buffer_1, buffer_2, inv_edges_per_node, num_nodes); memcpy(graph_nodes_out, buffer_2, num_nodes * sizeof(float)); } else { memcpy(graph_nodes_out, buffer_1, num_nodes * sizeof(float)); } double cpu_elapsed_time = stop_timer(&timer); delete[] buffer_1; delete[] buffer_2; return cpu_elapsed_time; } void generateGraph( int num_nodes, int avg_edges, std::vector<uint>& h_graph_indices, std::vector<uint>& h_graph_edges, std::vector<float>& h_inv_edges_per_node, std::vector<float>& h_node_values_input, std::vector<float>& h_gpu_node_values_output, std::vector<float>& h_cpu_node_values_output ){ h_graph_indices.resize(num_nodes + 1); h_node_values_input.resize(num_nodes); h_inv_edges_per_node.resize(num_nodes, 0.f); h_graph_edges.resize(num_nodes * avg_edges); h_gpu_node_values_output.resize(num_nodes); h_cpu_node_values_output.resize(num_nodes); h_graph_indices[0] = 0; int nodes_per_block = num_nodes / (avg_edges * 2 - 1) + 1; for (int i = 0; i < num_nodes; i++) { // each node has a deterministic number of edges that goes // 1, 1, 1, 1, ..., 2, 2, 2, 2, ..., 2 * avg_edges - 1 int nr_edges = i / nodes_per_block + 1; h_graph_indices[i+1] = h_graph_indices[i] + nr_edges; //assign a random node for each edge for (uint j = h_graph_indices[i]; j < h_graph_indices[i + 1]; j++) { // assign out node h_graph_edges[j] = rand() % num_nodes; // increment out degree h_inv_edges_per_node[h_graph_edges[j]]++; } h_node_values_input[i] = 1.f / (float) num_nodes; } // invert the out degree for (int i = 0; i < num_nodes; i++) { h_inv_edges_per_node[i] = 1.f / h_inv_edges_per_node[i]; } } void checkErrors( const std::vector<float>& h_gpu_node_values_output, const std::vector<float>& h_cpu_node_values_output ) { assert(h_gpu_node_values_output.size() == h_cpu_node_values_output.size()); int num_errors = 0; for (uint i = 0 ; i < h_gpu_node_values_output.size(); i++) { float n = h_gpu_node_values_output[i]; float c = h_cpu_node_values_output[i]; if (AlmostEqualUlps(n, c, MAX_ULPS)) continue; // error case // num_errors++; if (num_errors < 10) { std::cerr << "Mismatch at node " << i << std::endl; std::cerr << "Expected " << c << " and got " << n << std::endl; } else { std::cerr << "\nToo many errors, quitting" << std::endl; exit(1); } } if (num_errors) { std::cerr << "There were errors, quitting" << std::endl; exit(1); } } int main() { // initalize CUDA and warmup kernel to avoid including these costs in the timings cudaFree(0); device_graph_propagate<<<1, 1>>>(nullptr, nullptr, nullptr, nullptr, nullptr, 0); std::vector<uint> num_nodes; std::vector<uint> avg_edges; num_nodes.push_back(1 << 15); for (uint i = 0; i < 5; ++i) num_nodes.push_back(num_nodes.back() * 2); for (uint i = 2; i < 20; ++i) avg_edges.push_back(i); // index array has to be n+1 so that the last thread can // still look at its neighbor for a stopping point std::vector<uint> h_graph_indices; std::vector<uint> h_graph_edges; std::vector<float> h_inv_edges_per_node; std::vector<float> h_node_values_input; std::vector<float> h_gpu_node_values_output; std::vector<float> h_cpu_node_values_output; // generate random input // initialize srand(time(nullptr)); std::cout << std::setw(60) << "Device Bandwidth GB/sec" << std::endl << std::endl; std::cout << std::setw(66) << "Number of nodes\n" << std::setw(15) << " "; for (const uint node : num_nodes) std::cout << std::setw(15) << node; std::cout << std::endl; std::cout << std::setw(16) << "Avg. no. edges\n"; for (const uint edge : avg_edges) { std::cout << std::setw(15) << edge; for (const uint node : num_nodes) { generateGraph(node, edge, h_graph_indices, h_graph_edges, h_inv_edges_per_node, h_node_values_input, h_gpu_node_values_output, h_cpu_node_values_output); // generate gpu output & timings double gpu_time = device_graph_iterate( h_graph_indices.data(), h_graph_edges.data(), h_node_values_input.data(), h_gpu_node_values_output.data(), h_inv_edges_per_node.data(), NUM_ITERATIONS, node, edge ); // generate reference output host_graph_iterate( h_graph_indices.data(), h_graph_edges.data(), h_node_values_input.data(), h_cpu_node_values_output.data(), h_inv_edges_per_node.data(), NUM_ITERATIONS, node ); // check CUDA output versus reference output checkErrors(h_gpu_node_values_output, h_cpu_node_values_output); // TODO: fill in the calculation for totalBytes uint totalBytes = (node*sizeof(float) + node*edge*(sizeof(uint)+2*sizeof(float)))*NUM_ITERATIONS; std::cout << std::setw(15) << std::fixed << std::setprecision(2) << totalBytes / (gpu_time / 1000.) / 1E9 << std::flush; } std::cout << std::endl; } return 0; }
50b81386888768a526e15fdea57e7014de2a4d25.hip
// !!! This is a file automatically generated by hipify!!! /* Bluebird Library - High performance CPUs and GPUs computing library. * * Copyright (C) 2012-2013 Orange Owl Solutions. * * This file is part of Bluebird Library. * Bluebird Library is free software: you can redistribute it and/or modify * it under the terms of the Lesser GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Bluebird Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * Lesser GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Bluebird Library. If not, see <http://www.gnu.org/licenses/>. * * * For any request, question or bug reporting please visit http://www.orangeowlsolutions.com/ * or send an e-mail to: [email protected] * * */ #include "BB.h" #include "HExceptions.h" #include "Hmatrix.h" #include "Dmatrix.cuh" #include "Addition.cuh" #include "Expression.cuh" #include "Promotion.cuh" #include "Scalar.cuh" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define Scalar_Scalar(T1,T2,OpClass,overloaded_operator) template <> typename BB::Promotion<T1,T2>::strongest BB::overloaded_operator(const T1 a,const T2 b) { return BB::OpClass::eval(a,b); } Promotion<int,int2_>::strongest operator+(const int a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<int,float2_>::strongest operator+(const int a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<int,double2_>::strongest operator+(const int a,const double2_ b) { return BB::Sum::eval(a,b); } Promotion<float,int2_>::strongest operator+(const float a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<float,float2_>::strongest operator+(const float a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<float,double2_>::strongest operator+(const float a,const double2_ b) { return BB::Sum::eval(a,b); } Promotion<double,int2_>::strongest operator+(const double a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<double,float2_>::strongest operator+(const double a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<double,double2_>::strongest operator+(const double a,const double2_ b) { return BB::Sum::eval(a,b); } Promotion<int2_,int>::strongest operator+(const int2_ a,const int b) { return BB::Sum::eval(a,b); } Promotion<int2_,float>::strongest operator+(const int2_ a,const float b) { return BB::Sum::eval(a,b); } Promotion<int2_,double>::strongest operator+(const int2_ a,const double b) { return BB::Sum::eval(a,b); } Promotion<int2_,int2_>::strongest operator+(const int2_ a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<int2_,float2_>::strongest operator+(const int2_ a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<int2_,double2_>::strongest operator+(const int2_ a,const double2_ b) { return BB::Sum::eval(a,b); } Promotion<float2_,int>::strongest operator+(const float2_ a,const int b) { return BB::Sum::eval(a,b); } Promotion<float2_,float>::strongest operator+(const float2_ a,const float b) { return BB::Sum::eval(a,b); } Promotion<float2_,double>::strongest operator+(const float2_ a,const double b) { return BB::Sum::eval(a,b); } Promotion<float2_,int2_>::strongest operator+(const float2_ a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<float2_,float2_>::strongest operator+(const float2_ a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<float2_,double2_>::strongest operator+(const float2_ a,const double2_ b) { return BB::Sum::eval(a,b); } Promotion<double2_,int>::strongest operator+(const double2_ a,const int b) { return BB::Sum::eval(a,b); } Promotion<double2_,float>::strongest operator+(const double2_ a,const float b) { return BB::Sum::eval(a,b); } Promotion<double2_,double>::strongest operator+(const double2_ a,const double b) { return BB::Sum::eval(a,b); } Promotion<double2_,int2_>::strongest operator+(const double2_ a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<double2_,float2_>::strongest operator+(const double2_ a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<double2_,double2_>::strongest operator+(const double2_ a,const double2_ b) { return BB::Sum::eval(a,b); } #define Matrix_Scalar_Matrix(T1,T2,OpClass,overloaded_operator) template <> \ BB::Expr<BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1> &v1,const T2 v2) \ { \ BB::Scalar<T2> c(v2); \ typedef BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),c),v1.GetRows(),v1.GetColumns(),ISHOST); \ } #define Matrix_Scalar_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <> \ BB::Expr<BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1> &v1,const T2 v2) \ { \ BB::Scalar<T2> c(v2); \ typedef BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),c),v1.GetRows(),v1.GetColumns(),ISDEVICE); \ } #define Scalar_Matrix_Matrix(T1,T2,OpClass,overloaded_operator) template <> BB::Expr<BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const T2 v1,const BB::Hmatrix<T1> &v2) \ { \ BB::Scalar<T2> c(v1); \ typedef BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(c,v2.GetDataPointer()),v2.GetRows(),v2.GetColumns(),ISHOST); \ } #define Scalar_Matrix_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <> BB::Expr<BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const T2 v1,const BB::Dmatrix<T1> &v2) \ { \ BB::Scalar<T2> c(v1); \ typedef BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(c,v2.GetDataPointer()),v2.GetRows(),v2.GetColumns(),ISDEVICE); \ } // Hmatrix-Hmatrix Hmatrix -- TESTED #define Matrix_Matrix_Matrix(T1,T2,OpClass,overloaded_operator) template <class T1,class T2> \ BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1> &v1,const BB::Hmatrix<T2> &v2) \ { \ if((v1.GetRows() == v2.GetRows()) && (v1.GetColumns() == v2.GetColumns())) \ { \ typedef BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),v2.GetDataPointer()),v1.GetRows(),v1.GetColumns(),ISHOST); \ } else { char* str0 = "****************************************************************\n"; \ char* str1 = "* Size mismatch in binary CPU matrix operation (matrix-matrix) *\n"; \ char* str2 = "Left operand size: "; \ char* str3 = "Right operand size: "; \ char* str4 = "Operation: "; \ char* catString = (char*) malloc(2*strlen(str0)+strlen(str1)+strlen(str2)+strlen(str3)+strlen(str4)+50*sizeof(char)); \ sprintf(catString, "%s%s%s\n%s%i x %i\n%s%i x %i\n%s%s",str0,str1,str0,str2,v1.GetRows(),v1.GetColumns(),str3,v2.GetRows(),v2.GetColumns(),str4,typeid(BB::OpClass).name()); \ throw BB::GenericError(catString,__FILE__,__LINE__); \ } \ } // Hmatrix-Hmatrix Dmatrix -- TESTED #define Matrix_Matrix_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <class T1,class T2> \ BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1> &v1,const BB::Dmatrix<T2> &v2) \ { \ if((v1.GetRows() == v2.GetRows()) && (v1.GetColumns() == v2.GetColumns())) \ { \ typedef BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),v2.GetDataPointer()),v1.GetRows(),v1.GetColumns(),ISDEVICE); \ } else { char* str0 = "****************************************************************\n"; \ char* str1 = "* Size mismatch in binary GPU matrix operation (matrix-matrix) *\n"; \ char* str2 = "Left operand size: "; \ char* str3 = "Right operand size: "; \ char* str4 = "Operation: "; \ char* catString = (char*) malloc(2*strlen(str0)+strlen(str1)+strlen(str2)+strlen(str3)+strlen(str4)+50*sizeof(char)); \ sprintf(catString, "%s%s%s\n%s%i x %i\n%s%i x %i\n%s%s",str0,str1,str0,str2,v1.GetRows(),v1.GetColumns(),str3,v2.GetRows(),v2.GetColumns(),str4,typeid(BB::OpClass).name()); \ throw BB::GenericError(catString,__FILE__,__LINE__); \ } \ } Matrix_Matrix_Matrix(T1,T2,Sum,operator+) Matrix_Matrix_CudaMatrix(T1,T2,Sum,operator+) // Hmatrix-Hmatrix Hmatrix -- TESTED #define Matrix_Matrix_Matrix_Prototype(T1,T2,OpClass,overloaded_operator) template BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1>&,const BB::Hmatrix<T2>&); // Hmatrix-Hmatrix Dmatrix -- TESTED #define Matrix_Matrix_CudaMatrix_Prototype(T1,T2,OpClass,overloaded_operator) template BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1>&,const BB::Dmatrix<T2>&); Matrix_Scalar_Matrix(int,int,Sum,operator+) Matrix_Scalar_Matrix(int,float,Sum,operator+) Matrix_Scalar_Matrix(int,double,Sum,operator+) Matrix_Scalar_Matrix(int,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(int,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(int,BB::double2_,Sum,operator+) Matrix_Scalar_Matrix(float,int,Sum,operator+) Matrix_Scalar_Matrix(float,float,Sum,operator+) Matrix_Scalar_Matrix(float,double,Sum,operator+) Matrix_Scalar_Matrix(float,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(float,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(float,BB::double2_,Sum,operator+) Matrix_Scalar_Matrix(double,int,Sum,operator+) Matrix_Scalar_Matrix(double,float,Sum,operator+) Matrix_Scalar_Matrix(double,double,Sum,operator+) Matrix_Scalar_Matrix(double,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(double,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(double,BB::double2_,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,int,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,float,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,double,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,BB::double2_,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,int,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,float,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,double,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,BB::double2_,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,int,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,float,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,double,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(int,int,Sum,operator+) Matrix_Scalar_CudaMatrix(int,float,Sum,operator+) Matrix_Scalar_CudaMatrix(int,double,Sum,operator+) Matrix_Scalar_CudaMatrix(int,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(int,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(int,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(float,int,Sum,operator+) Matrix_Scalar_CudaMatrix(float,float,Sum,operator+) Matrix_Scalar_CudaMatrix(float,double,Sum,operator+) Matrix_Scalar_CudaMatrix(float,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(float,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(float,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(double,int,Sum,operator+) Matrix_Scalar_CudaMatrix(double,float,Sum,operator+) Matrix_Scalar_CudaMatrix(double,double,Sum,operator+) Matrix_Scalar_CudaMatrix(double,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(double,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(double,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,int,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,float,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,double,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,int,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,float,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,double,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,int,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,float,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,double,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(int,int,Sum,operator+) Scalar_Matrix_Matrix(int,float,Sum,operator+) Scalar_Matrix_Matrix(int,double,Sum,operator+) Scalar_Matrix_Matrix(int,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(int,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(int,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(float,int,Sum,operator+) Scalar_Matrix_Matrix(float,float,Sum,operator+) Scalar_Matrix_Matrix(float,double,Sum,operator+) Scalar_Matrix_Matrix(float,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(float,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(float,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(double,int,Sum,operator+) Scalar_Matrix_Matrix(double,float,Sum,operator+) Scalar_Matrix_Matrix(double,double,Sum,operator+) Scalar_Matrix_Matrix(double,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(double,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(double,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,int,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,float,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,double,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,int,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,float,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,double,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,int,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,float,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,double,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(int,int,Sum,operator+) Scalar_Matrix_CudaMatrix(int,float,Sum,operator+) Scalar_Matrix_CudaMatrix(int,double,Sum,operator+) Scalar_Matrix_CudaMatrix(int,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(int,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(int,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(float,int,Sum,operator+) Scalar_Matrix_CudaMatrix(float,float,Sum,operator+) Scalar_Matrix_CudaMatrix(float,double,Sum,operator+) Scalar_Matrix_CudaMatrix(float,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(float,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(float,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(double,int,Sum,operator+) Scalar_Matrix_CudaMatrix(double,float,Sum,operator+) Scalar_Matrix_CudaMatrix(double,double,Sum,operator+) Scalar_Matrix_CudaMatrix(double,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(double,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(double,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,int,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,float,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,double,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,int,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,float,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,double,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,int,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,float,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,double,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::double2_,Sum,operator+)
50b81386888768a526e15fdea57e7014de2a4d25.cu
/* Bluebird Library - High performance CPUs and GPUs computing library. * * Copyright (C) 2012-2013 Orange Owl Solutions. * * This file is part of Bluebird Library. * Bluebird Library is free software: you can redistribute it and/or modify * it under the terms of the Lesser GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Bluebird Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * Lesser GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Bluebird Library. If not, see <http://www.gnu.org/licenses/>. * * * For any request, question or bug reporting please visit http://www.orangeowlsolutions.com/ * or send an e-mail to: [email protected] * * */ #include "BB.h" #include "HExceptions.h" #include "Hmatrix.h" #include "Dmatrix.cuh" #include "Addition.cuh" #include "Expression.cuh" #include "Promotion.cuh" #include "Scalar.cuh" #include <cuda.h> #include <cuda_runtime.h> #define Scalar_Scalar(T1,T2,OpClass,overloaded_operator) template <> typename BB::Promotion<T1,T2>::strongest BB::overloaded_operator(const T1 a,const T2 b) { return BB::OpClass::eval(a,b); } Promotion<int,int2_>::strongest operator+(const int a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<int,float2_>::strongest operator+(const int a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<int,double2_>::strongest operator+(const int a,const double2_ b) { return BB::Sum::eval(a,b); } Promotion<float,int2_>::strongest operator+(const float a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<float,float2_>::strongest operator+(const float a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<float,double2_>::strongest operator+(const float a,const double2_ b) { return BB::Sum::eval(a,b); } Promotion<double,int2_>::strongest operator+(const double a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<double,float2_>::strongest operator+(const double a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<double,double2_>::strongest operator+(const double a,const double2_ b) { return BB::Sum::eval(a,b); } Promotion<int2_,int>::strongest operator+(const int2_ a,const int b) { return BB::Sum::eval(a,b); } Promotion<int2_,float>::strongest operator+(const int2_ a,const float b) { return BB::Sum::eval(a,b); } Promotion<int2_,double>::strongest operator+(const int2_ a,const double b) { return BB::Sum::eval(a,b); } Promotion<int2_,int2_>::strongest operator+(const int2_ a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<int2_,float2_>::strongest operator+(const int2_ a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<int2_,double2_>::strongest operator+(const int2_ a,const double2_ b) { return BB::Sum::eval(a,b); } Promotion<float2_,int>::strongest operator+(const float2_ a,const int b) { return BB::Sum::eval(a,b); } Promotion<float2_,float>::strongest operator+(const float2_ a,const float b) { return BB::Sum::eval(a,b); } Promotion<float2_,double>::strongest operator+(const float2_ a,const double b) { return BB::Sum::eval(a,b); } Promotion<float2_,int2_>::strongest operator+(const float2_ a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<float2_,float2_>::strongest operator+(const float2_ a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<float2_,double2_>::strongest operator+(const float2_ a,const double2_ b) { return BB::Sum::eval(a,b); } Promotion<double2_,int>::strongest operator+(const double2_ a,const int b) { return BB::Sum::eval(a,b); } Promotion<double2_,float>::strongest operator+(const double2_ a,const float b) { return BB::Sum::eval(a,b); } Promotion<double2_,double>::strongest operator+(const double2_ a,const double b) { return BB::Sum::eval(a,b); } Promotion<double2_,int2_>::strongest operator+(const double2_ a,const int2_ b) { return BB::Sum::eval(a,b); } Promotion<double2_,float2_>::strongest operator+(const double2_ a,const float2_ b) { return BB::Sum::eval(a,b); } Promotion<double2_,double2_>::strongest operator+(const double2_ a,const double2_ b) { return BB::Sum::eval(a,b); } #define Matrix_Scalar_Matrix(T1,T2,OpClass,overloaded_operator) template <> \ BB::Expr<BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1> &v1,const T2 v2) \ { \ BB::Scalar<T2> c(v2); \ typedef BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),c),v1.GetRows(),v1.GetColumns(),ISHOST); \ } #define Matrix_Scalar_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <> \ BB::Expr<BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1> &v1,const T2 v2) \ { \ BB::Scalar<T2> c(v2); \ typedef BB::BinExpr<const T1*,const BB::Scalar<T2>,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),c),v1.GetRows(),v1.GetColumns(),ISDEVICE); \ } #define Scalar_Matrix_Matrix(T1,T2,OpClass,overloaded_operator) template <> BB::Expr<BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const T2 v1,const BB::Hmatrix<T1> &v2) \ { \ BB::Scalar<T2> c(v1); \ typedef BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(c,v2.GetDataPointer()),v2.GetRows(),v2.GetColumns(),ISHOST); \ } #define Scalar_Matrix_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <> BB::Expr<BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const T2 v1,const BB::Dmatrix<T1> &v2) \ { \ BB::Scalar<T2> c(v1); \ typedef BB::BinExpr<const BB::Scalar<T2>,const T1*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(c,v2.GetDataPointer()),v2.GetRows(),v2.GetColumns(),ISDEVICE); \ } // Hmatrix-Hmatrix Hmatrix -- TESTED #define Matrix_Matrix_Matrix(T1,T2,OpClass,overloaded_operator) template <class T1,class T2> \ BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1> &v1,const BB::Hmatrix<T2> &v2) \ { \ if((v1.GetRows() == v2.GetRows()) && (v1.GetColumns() == v2.GetColumns())) \ { \ typedef BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),v2.GetDataPointer()),v1.GetRows(),v1.GetColumns(),ISHOST); \ } else { char* str0 = "****************************************************************\n"; \ char* str1 = "* Size mismatch in binary CPU matrix operation (matrix-matrix) *\n"; \ char* str2 = "Left operand size: "; \ char* str3 = "Right operand size: "; \ char* str4 = "Operation: "; \ char* catString = (char*) malloc(2*strlen(str0)+strlen(str1)+strlen(str2)+strlen(str3)+strlen(str4)+50*sizeof(char)); \ sprintf(catString, "%s%s%s\n%s%i x %i\n%s%i x %i\n%s%s",str0,str1,str0,str2,v1.GetRows(),v1.GetColumns(),str3,v2.GetRows(),v2.GetColumns(),str4,typeid(BB::OpClass).name()); \ throw BB::GenericError(catString,__FILE__,__LINE__); \ } \ } // Hmatrix-Hmatrix Dmatrix -- TESTED #define Matrix_Matrix_CudaMatrix(T1,T2,OpClass,overloaded_operator) template <class T1,class T2> \ BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1> &v1,const BB::Dmatrix<T2> &v2) \ { \ if((v1.GetRows() == v2.GetRows()) && (v1.GetColumns() == v2.GetColumns())) \ { \ typedef BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest> BExpr; \ return BB::Expr<BExpr,typename BB::Promotion<T1,T2>::strongest>(BExpr(v1.GetDataPointer(),v2.GetDataPointer()),v1.GetRows(),v1.GetColumns(),ISDEVICE); \ } else { char* str0 = "****************************************************************\n"; \ char* str1 = "* Size mismatch in binary GPU matrix operation (matrix-matrix) *\n"; \ char* str2 = "Left operand size: "; \ char* str3 = "Right operand size: "; \ char* str4 = "Operation: "; \ char* catString = (char*) malloc(2*strlen(str0)+strlen(str1)+strlen(str2)+strlen(str3)+strlen(str4)+50*sizeof(char)); \ sprintf(catString, "%s%s%s\n%s%i x %i\n%s%i x %i\n%s%s",str0,str1,str0,str2,v1.GetRows(),v1.GetColumns(),str3,v2.GetRows(),v2.GetColumns(),str4,typeid(BB::OpClass).name()); \ throw BB::GenericError(catString,__FILE__,__LINE__); \ } \ } Matrix_Matrix_Matrix(T1,T2,Sum,operator+) Matrix_Matrix_CudaMatrix(T1,T2,Sum,operator+) // Hmatrix-Hmatrix Hmatrix -- TESTED #define Matrix_Matrix_Matrix_Prototype(T1,T2,OpClass,overloaded_operator) template BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Hmatrix<T1>&,const BB::Hmatrix<T2>&); // Hmatrix-Hmatrix Dmatrix -- TESTED #define Matrix_Matrix_CudaMatrix_Prototype(T1,T2,OpClass,overloaded_operator) template BB::Expr<BB::BinExpr<const T1*,const T2*,BB::OpClass,typename BB::Promotion<T1,T2>::strongest>,typename BB::Promotion<T1,T2>::strongest> BB::overloaded_operator(const BB::Dmatrix<T1>&,const BB::Dmatrix<T2>&); Matrix_Scalar_Matrix(int,int,Sum,operator+) Matrix_Scalar_Matrix(int,float,Sum,operator+) Matrix_Scalar_Matrix(int,double,Sum,operator+) Matrix_Scalar_Matrix(int,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(int,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(int,BB::double2_,Sum,operator+) Matrix_Scalar_Matrix(float,int,Sum,operator+) Matrix_Scalar_Matrix(float,float,Sum,operator+) Matrix_Scalar_Matrix(float,double,Sum,operator+) Matrix_Scalar_Matrix(float,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(float,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(float,BB::double2_,Sum,operator+) Matrix_Scalar_Matrix(double,int,Sum,operator+) Matrix_Scalar_Matrix(double,float,Sum,operator+) Matrix_Scalar_Matrix(double,double,Sum,operator+) Matrix_Scalar_Matrix(double,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(double,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(double,BB::double2_,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,int,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,float,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,double,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(BB::int2_,BB::double2_,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,int,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,float,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,double,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(BB::float2_,BB::double2_,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,int,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,float,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,double,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,BB::int2_,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,BB::float2_,Sum,operator+) Matrix_Scalar_Matrix(BB::double2_,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(int,int,Sum,operator+) Matrix_Scalar_CudaMatrix(int,float,Sum,operator+) Matrix_Scalar_CudaMatrix(int,double,Sum,operator+) Matrix_Scalar_CudaMatrix(int,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(int,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(int,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(float,int,Sum,operator+) Matrix_Scalar_CudaMatrix(float,float,Sum,operator+) Matrix_Scalar_CudaMatrix(float,double,Sum,operator+) Matrix_Scalar_CudaMatrix(float,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(float,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(float,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(double,int,Sum,operator+) Matrix_Scalar_CudaMatrix(double,float,Sum,operator+) Matrix_Scalar_CudaMatrix(double,double,Sum,operator+) Matrix_Scalar_CudaMatrix(double,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(double,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(double,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,int,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,float,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,double,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::int2_,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,int,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,float,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,double,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::float2_,BB::double2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,int,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,float,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,double,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,BB::int2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,BB::float2_,Sum,operator+) Matrix_Scalar_CudaMatrix(BB::double2_,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(int,int,Sum,operator+) Scalar_Matrix_Matrix(int,float,Sum,operator+) Scalar_Matrix_Matrix(int,double,Sum,operator+) Scalar_Matrix_Matrix(int,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(int,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(int,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(float,int,Sum,operator+) Scalar_Matrix_Matrix(float,float,Sum,operator+) Scalar_Matrix_Matrix(float,double,Sum,operator+) Scalar_Matrix_Matrix(float,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(float,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(float,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(double,int,Sum,operator+) Scalar_Matrix_Matrix(double,float,Sum,operator+) Scalar_Matrix_Matrix(double,double,Sum,operator+) Scalar_Matrix_Matrix(double,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(double,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(double,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,int,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,float,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,double,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(BB::int2_,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,int,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,float,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,double,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(BB::float2_,BB::double2_,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,int,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,float,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,double,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,BB::int2_,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,BB::float2_,Sum,operator+) Scalar_Matrix_Matrix(BB::double2_,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(int,int,Sum,operator+) Scalar_Matrix_CudaMatrix(int,float,Sum,operator+) Scalar_Matrix_CudaMatrix(int,double,Sum,operator+) Scalar_Matrix_CudaMatrix(int,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(int,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(int,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(float,int,Sum,operator+) Scalar_Matrix_CudaMatrix(float,float,Sum,operator+) Scalar_Matrix_CudaMatrix(float,double,Sum,operator+) Scalar_Matrix_CudaMatrix(float,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(float,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(float,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(double,int,Sum,operator+) Scalar_Matrix_CudaMatrix(double,float,Sum,operator+) Scalar_Matrix_CudaMatrix(double,double,Sum,operator+) Scalar_Matrix_CudaMatrix(double,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(double,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(double,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,int,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,float,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,double,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::int2_,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,int,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,float,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,double,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::float2_,BB::double2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,int,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,float,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,double,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,BB::int2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,BB::float2_,Sum,operator+) Scalar_Matrix_CudaMatrix(BB::double2_,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(int,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(float,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(double,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::int2_,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::float2_,BB::double2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,int,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,float,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,double,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::int2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::float2_,Sum,operator+) Matrix_Matrix_Matrix_Prototype(BB::double2_,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(int,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(float,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(double,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::int2_,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::float2_,BB::double2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,int,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,float,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,double,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::int2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::float2_,Sum,operator+) Matrix_Matrix_CudaMatrix_Prototype(BB::double2_,BB::double2_,Sum,operator+)
414a541c55082fcbd47e4bdf2addfd0812cb634c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "adNOC_2Pb_runKernels.cuh" #include <assert.h> RunKernels::RunKernels(char *fname) { readinParamFile(fname); dataInitialization(); } void RunKernels::readinParamFile(char *fname) { FILE* FID = fopen(fname, "r"); if(!FID) { printf("Parameter file %s is missing. Abort...\n", fname); exit(1); } char line[1024]; readParamLine(FID, line, 1024); sscanf(line, "%lf", &TotalSim); readParamLine(FID, line, 1024); sscanf(line, "%lf", &EachOut); readParamLine(FID, line, 1024); sscanf(line, "%lf", &delta0); readParamLine(FID, line, 1024); sscanf(line, "%lf", &Cd); readParamLine(FID, line, 1024); sscanf(line, "%lf", &N_R); readParamLine(FID, line, 1024); sscanf(line, "%lf", &varTheta); readParamLine(FID, line, 1024); sscanf(line, "%lf", &phiS0); readParamLine(FID, line, 1024); sscanf(line, "%s", DEMData); readParamLine(FID, line, 1024); sscanf(line, "%s", IniHData); readParamLine(FID, line, 1024); sscanf(line, "%s", IniUData); readParamLine(FID, line, 1024); sscanf(line, "%s", IniVData); readParamLine(FID, line, 1024); sscanf(line, "%s", locData); } void RunKernels::readParamLine(FILE *fid, char *line, int len) { char *c; fgets(line, len, fid); // Remove the things after '#', or end of line ('\n') for(c = line; *c; c++) { int br = 0; switch(*c) { case '#': case '\n': *c = 0; br = 1; break; default: break; } if(br) break; } return; } void RunKernels::split(const string& s, vector<string>& sv, const char* delim) { sv.clear(); char* buffer = new char[s.size() + 1]; buffer[s.size()] = '\0'; copy(s.begin(), s.end(), buffer); char* p = std::strtok(buffer, delim); do { sv.push_back(p); } while ((p = strtok(NULL, delim))); delete[] buffer; return; } void RunKernels::dataInitialization() { fprintf(stdout,"\n\t***********************************************************************************\n"); fprintf(stdout,"\t\t* * * * * MoSES_2PDF: inflow (debris flow) mode * * * * * \n"); fprintf(stdout,"\t-----------------------------------------------------------------------------------\n"); fprintf(stdout,"\t\t2D Central Scheme (adNOC) Mixture Code in CUDA: inflow mode\n"); fprintf(stdout,"\t\tAuthor : Chi-Jyun Ko, Po-Chih Chen, Hock-Kiet Wong and Yih-Chin Tai\n"); fprintf(stdout,"\t\tLab for Computer Simulation and Visualization (CSV Lab), NCKU, Taiwan\n"); fprintf(stdout,"\t***********************************************************************************\n\n"); ifstream inputFileTopo; ifstream inputFileInitH; ifstream inputFileInitU; ifstream inputFileInitV; ifstream inputFileloc; inputFileTopo.open(DEMData, ios::in); inputFileInitH.open(IniHData, ios::in); inputFileInitU.open(IniUData, ios::in); inputFileInitV.open(IniVData, ios::in); inputFileloc.open(locData, ios::in); fprintf(stdout,"\t\tinput Topo File : %s\n", DEMData); fprintf(stdout,"\t\tinput Initial File (H) : %s\n", IniHData); fprintf(stdout,"\t\tinput Initial File (U) : %s\n", IniUData); fprintf(stdout,"\t\tinput Initial File (V) : %s\n\n", IniVData); // read Topo data if (inputFileTopo.fail() ){ printf("\n\t----------------------------------\n"); printf("\t Error can't open Topo file.\n"); printf("\t----------------------------------\n"); assert(0); } // read init data if (inputFileInitH.fail() ){ printf("\n\t-------------------------------------------------\n"); printf("\t Error can't open Initial depth file.\n"); printf("\t-------------------------------------------------\n"); assert(0); } if (inputFileInitU.fail() ){ printf("\n\t-------------------------------------------------\n"); printf("\t Error can't open Initial velocity(U) file.\n"); printf("\t-------------------------------------------------\n"); assert(0); } if (inputFileInitV.fail() ){ printf("\n\t-------------------------------------------------\n"); printf("\t Error can't open Initial velocity(V) file.\n"); printf("\t-------------------------------------------------\n"); assert(0); } if (inputFileloc.fail() ){ printf("\n\t-------------------------------------------------\n"); printf("\t Error can't open Inflow location file.\n"); printf("\t-------------------------------------------------\n"); assert(0); } while(getline(inputFileTopo,TopoS)){ inputTopoTmp.push_back(TopoS); } getline(inputFileInitH, IniHS); while(getline(inputFileInitH,IniHS)){ inputIniHTmp.push_back(IniHS); } getline(inputFileInitU, IniUS); while(getline(inputFileInitU,IniUS)){ inputIniUTmp.push_back(IniUS); } getline(inputFileInitV, IniVS); while(getline(inputFileInitV,IniVS)){ inputIniVTmp.push_back(IniVS); } getline(inputFileloc, locS); while(getline(inputFileloc,locS)){ inputlocTmp.push_back(locS); } // read Topo file for(int i=0;i<6;i++){ split(inputTopoTmp[i], Topodata, " "); if(i==0){ NX = stoi(Topodata[1]); } else if(i==1){ NY = stoi(Topodata[1]); } else if(i==2){ xllcorner = stof(Topodata[1]); }else if(i==3){ yllcorner = stof(Topodata[1]); } else if(i==4){ dx = stof(Topodata[1]); dy = stof(Topodata[1]); } else if(i==5){ if(Topodata[0] == "NODATA_value"){ StartTopo = 0; } else{ StartTopo = 1; } } } Iniflowlen = inputIniHTmp.size(); locflowlen = inputlocTmp.size(); nx = NX; ny = NY; dx = dx*10; dy = dy*10; MINX = 0.0; MINY = 0.0; MAXX = dx*(nx-1); MAXY = dy*(ny-1); nxd = nx + 2*MD; nyd = ny + 2*MD; nxyd = max(nxd,nyd); arraySize = nxd * nyd; fprintf(stdout,"\t\tData points : %d,%d\n", nx, ny); fprintf(stdout,"\t\tDomain [dm] : (%5.3f,%5.3f)(%5.3f,%5.3f)\n", MINX, MAXX, MINY, MAXY); fprintf(stdout,"\t\tGrid size : %6.2f,%6.2f (%d,%d)\n",dx,dy,nx,ny); fprintf(stdout,"\t\tCFL number : %5.3f\n", CFL); fprintf(stdout,"\t\tdelta0 : %5.3f\n", delta0); fprintf(stdout,"\t\tCd : %5.3f\n", Cd); fprintf(stdout,"\t\tN_R : %5.3f\n", N_R); fprintf(stdout,"\t\tvarTheta : %5.3f\n", varTheta); fprintf(stdout,"\t\tinitial value of solid volume fraction : %5.3f\n\n", phiS0); fprintf(stdout,"\t\tTotal simulation time (sec) : %5.3f\n", TotalSim); fprintf(stdout,"\t\tEach output time (sec) : %5.3f\n\n", EachOut); NEW_MATRIX(topo, double, nxd, nyd); NEW_MATRIX(depth, double, nxd, nyd); NEW_MATRIX(inputLoc, double, 3, locflowlen); NEW_3DMATRIX(inputFlow, double, locflowlen, Iniflowlen, 3); NEW_ARRAY(inflowTime, double, Iniflowlen); NEW_MATRIX(dire, int, 3, locflowlen); // input Topo to matrix if(StartTopo==1){ for(int j=0; j<NY; j++) { split(inputTopoTmp[j+5], Topodata, " "); for(int i=0; i<NX; i++) { topo[i+MD][j+MD] = stof(Topodata[i]); topo[i+MD][j+MD] = 10.0*topo[i+MD][j+MD]; if(topo[i+MD][j+MD]<0){ topo[i+MD][j+MD] = 0; } } } } else{ for(int j=0; j<NY; j++) { split(inputTopoTmp[j+6], Topodata, " "); for(int i=0; i<NX; i++) { topo[i+MD][j+MD] = stof(Topodata[i]); topo[i+MD][j+MD] = 10.0*topo[i+MD][j+MD]; if(topo[i+MD][j+MD]<0){ topo[i+MD][j+MD] = 0; } } } } // input Initial flow inflowSize = Iniflowlen*locflowlen; for(int j=0; j<Iniflowlen; j++) { split(inputIniHTmp[j], IniHdata, "\t"); split(inputIniUTmp[j], IniUdata, "\t"); split(inputIniVTmp[j], IniVdata, "\t"); inflowTime[j] = stof(IniHdata[0])*10; for(int i=0; i<locflowlen; i++) { inputFlow[i][j][0] = 10.0*stof(IniHdata[i+1]); inputFlow[i][j][1] = stof(IniUdata[i+1]); inputFlow[i][j][2] = stof(IniVdata[i+1]); } } // input Initial flow location for(int j=0; j<locflowlen; j++) { split(inputlocTmp[j], locdata, "\t"); // cout <<stof(IniHdata[0]) <<" "; for(int i=0; i<3; i++) { inputLoc[i][j] = stof(locdata[i]); // cout<<inputLoc[i][j] <<" "; } // cout <<"\n"; } // B.C. initioal condition for(int i=0; i<MD; i++) { for(int j=MD; j<(ny+MD); j++) { topo[i ][j] = topo[MD ][j]; topo[nx+MD+i][j] = topo[nx+MD-1][j]; } } for(int j=0; j<MD; j++) { for(int i=0; i<(nx+2*MD); i++) { topo[i][j ] = topo[i][MD ]; topo[i][ny+MD+j] = topo[i][ny+MD-1]; } } } clock_t RunKernels::run() { hipError_t errhost ; errhost = hipHostMalloc((void **)&TotalStep_h, sizeof(double)); errhost = hipHostMalloc((void **)&dt_h, sizeof(double)); errhost = hipHostMalloc((void **)&depth_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&topo_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&speed_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&resultHs_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&resultHf_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&resultUs_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&resultVs_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&resultUf_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&resultVf_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&resultphi_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&bfkt_h, sizeof(double) * arraySize * 3); errhost = hipHostMalloc((void **)&svec_h, sizeof(double) * arraySize * 2); errhost = hipHostMalloc((void **)&cvalue_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&result_h, sizeof(double) * arraySize); errhost = hipHostMalloc((void **)&inflow_h, sizeof(double) * inflowSize * 3); errhost = hipHostMalloc((void **)&loc_h, sizeof(double) * locflowlen * 3); errhost = hipHostMalloc((void **)&dire_h, sizeof(int) * locflowlen * 3); if(errhost != hipSuccess){ printf("\nCould not allocate Host memory : %d\n",errhost); } for(int i = 0; i < nxd; i++){ for(int j = 0; j < nyd; j++){ topo_h[j*nxd+i] = topo[i][j]; // depth_h[j*nxd+i] = depth[i][j]; } } for(int m=0;m<3;m++){ for(int j = 0; j < Iniflowlen; j++){ for(int i = 0; i < locflowlen; i++){ inflow_h[m * Iniflowlen * locflowlen + j*locflowlen + i] = inputFlow[i][j][m]; } } } for(int j=0;j<locflowlen;j++){ for(int i=0;i<3;i++){ if((inputLoc[2][j] == 0) || (inputLoc[2][j] == NY)){ dire[0][j] = inputLoc[0][j]; dire[1][j] = -5; //dire[0] direction in X dire[2][j] = 0; //dire[1] direction in Y }else{ dire[0][j] = inputLoc[0][j]; dire[1][j] = 0; dire[2][j] = -5; } // cout<<dire[i][j]<<" "; } // cout<<endl; } for(int i = 0; i < 3; i++){ for(int j = 0; j < locflowlen; j++){ loc_h[j*3+i] = inputLoc[i][j]; dire_h[j*3+i] = dire[i][j]; } } clock_t start, end; start = clock(); //cuda start memoryMalloc(); kernelStep(); freeMemory(); end = clock(); //cuda stop // outputFile(); return end - start; } void RunKernels::kernelStep() { hipError_t errMem, errCpy ; errMem = hipMemset(dev_TotalTime, 0.0, sizeof(double)); errMem = hipMemset(dev_dt, 0.0, sizeof(double)); errMem = hipMemset(dev_dtval, 0.0, sizeof(double)); errMem = hipMemset(dt_h, 0.0, sizeof(double)); if(errMem != hipSuccess){ printf("\nError cuda Memory set : %d\n",errMem); } errCpy = hipMemcpy(dev_topo, topo_h, sizeof(double) * arraySize, hipMemcpyHostToDevice); errCpy = hipMemcpy(dev_inflow, inflow_h, sizeof(double) * (inflowSize * 3), hipMemcpyHostToDevice); errCpy = hipMemcpy(dev_loc, loc_h, sizeof(double) * (locflowlen * 3), hipMemcpyHostToDevice); errCpy = hipMemcpy(dev_dire, dire_h, sizeof(int) * (locflowlen * 3), hipMemcpyHostToDevice); int bx = (nxd + BLOCK_SIZE - 1) / BLOCK_SIZE; int by = (nyd + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 blocksPerGrid(bx, by); dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); double Htmp, hstmp, hftmp, ustmp, uftmp, vstmp, vftmp, phitmp; int outputStep = TotalSim/EachOut; double tf[outputStep+1] = {0.0}; for(int ii=1;ii<=outputStep;ii++){ tf[ii] = ii*EachOut*10; } double outtime[outputStep+1]={0}; int iter = 1, nt, io; int nstop = 0, schreiben =0; // int Totalnt = 0; int outsteplen = sizeof(tf)/sizeof(tf)[0]; int inflowCount =0; hipLaunchKernelGGL(( makeTopo1Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_bfkt, MINX, MINY, dx, dy, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( makeTopo2Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_topo, dev_bfkt, dx, dy, nxd, nyd, nx, ny); hipDeviceSynchronize(); hipLaunchKernelGGL(( makeTopo3Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_bfkt, dev_posx, dev_posy, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( makeTopo4Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_bfkt, dev_posx, dev_posy, dev_dxdxi11, dev_dxdxi12, dev_dxdxi21, dev_dxdxi22, dev_dbdx, dev_dbdy, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( makeTopo5Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_dbdx, dev_dbdy, dev_cvalue, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( makeTopo6Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_dbdx, dev_dbdy, dev_cvalue, dev_svec, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( makeTopo7Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_dbdx, dev_dbdy, dev_cvalue, dev_svec, dev_Jacb31, dev_Jacb32, dev_dxdxi11, dev_dxdxi12, dev_dxdxi21, dev_dxdxi22, dev_dettmp, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( makeTopo8Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_cvalue, dev_svec, dev_Jacb31, dev_Jacb32, dev_dxdxi11, dev_dxdxi12, dev_dxdxi21, dev_dxdxi22, dev_dettmp, dev_Detmin, dev_i_ddxi11, dev_i_ddxi12, dev_i_ddxi21, dev_i_ddxi22, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( makeTopo9Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_cvalue, dev_svec, dev_i_ddxi11, dev_i_ddxi12, dev_i_ddxi21, dev_i_ddxi22, dev_invJ11, dev_invJ12, dev_invJ13, dev_invJ21, dev_invJ22, dev_invJ23, dev_invJ31, dev_invJ32, dev_invJ33, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( makeTopo11Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_tande, delta0, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Boundary1Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_dxdxi11, dev_dxdxi12, dev_dxdxi21, dev_dxdxi22, dev_cvalue, dev_Detmin, dev_svec, dev_Jacb31, dev_Jacb32, dev_invJ11, dev_invJ12, dev_invJ13, dev_invJ21, dev_invJ22, dev_invJ23, dev_invJ31, dev_invJ32, dev_invJ33, nxd, nyd, nx ,ny); hipDeviceSynchronize(); hipLaunchKernelGGL(( Boundary2Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_dxdxi11, dev_dxdxi12, dev_dxdxi21, dev_dxdxi22, dev_cvalue, dev_Detmin, dev_svec, dev_Jacb31, dev_Jacb32, dev_invJ11, dev_invJ12, dev_invJ13, dev_invJ21, dev_invJ22, dev_invJ23, dev_invJ31, dev_invJ32, dev_invJ33, nxd, nyd, nx ,ny); hipDeviceSynchronize(); hipLaunchKernelGGL(( JacobKernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_svec, dev_cvalue, dev_posx, dev_posy, dev_J13dxi, dev_J23dxi, dev_J33dxi, dev_J13det, dev_J23det, dev_J33det, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Boundary3Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_J13dxi, dev_J23dxi, dev_J33dxi, dev_J13det, dev_J23det, dev_J33det, nxd, nyd, nx ,ny); hipDeviceSynchronize(); errCpy = hipMemcpy(bfkt_h, &dev_bfkt[0], sizeof(double)* arraySize * 3, hipMemcpyDeviceToHost); errCpy = hipMemcpy(svec_h, &dev_svec[0], sizeof(double)* arraySize * 2, hipMemcpyDeviceToHost); errCpy = hipMemcpy(cvalue_h, &dev_cvalue[0], sizeof(double)* arraySize, hipMemcpyDeviceToHost); FILE *fpTopo; if ((fpTopo=fopen("./result2Pb/DEM.dat", "w")) == NULL) { printf("\n\t---------------------------------------------------------\n"); printf("\t Error can't open \"result2Pb\" folder.\n"); printf("\t Need build the result directory --> mkdir result2Pb\n"); printf("\t---------------------------------------------------------\n"); fclose(fpTopo); exit(0); } fprintf(fpTopo, "VARIABLES = \"x\", \"y\", \"z\", \"c\", \"S1\", \"S2\"\n "); for (int i=MD;i<nxd-MD;i++) { for (int j=MD;j<nyd-MD;j++) { fprintf(fpTopo, "%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\n",bfkt_h[0 * nxd * nyd + j * nxd + i]*0.1, bfkt_h[1 * nxd * nyd + j * nxd + i]*0.1, bfkt_h[2 * nxd * nyd + j * nxd + i]*0.1, cvalue_h[j * nxd + i],svec_h[0 * nxd * nyd + j * nxd + i],svec_h[1 * nxd * nyd + j * nxd + i]); } } fclose(fpTopo); FILE *fpInit; if ((fpInit=fopen("./result2Pb/001.dat", "w")) == NULL) { printf("\n\t---------------------------------------------------------\n"); printf("\t Error can't open \"result2Pb\" folder.\n"); printf("\t---------------------------------------------------------\n"); fclose(fpInit); exit(0); } fprintf(fpInit, "VARIABLES = \"H\", \"phi\", \"Us\", \"Uf\", \"Vs\", \"Vf\"\n "); for (int i=MD;i<nxd-MD;i++) { for (int j=MD;j<nyd-MD;j++) { fprintf(fpInit, "%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\n",0.1*depth[i][j],phiS0, 0.0, 0.0, 0.0, 0.0); } } fclose(fpInit); FILE *fpInfo; if ((fpInfo=fopen("./result2Pb/Info.dat", "w")) == NULL) { printf("\n\t---------------------------------------------------------\n"); printf("\t Error can't open \"result2Pb\" folder.\n"); printf("\t---------------------------------------------------------\n"); fclose(fpInfo); exit(0); } fprintf(fpInfo, "VARIABLES = \"x-point\", \"y-point\", \"dx\", \"dy\", \"xllcorner\", \"yllcorner\", \"TotalStep\"\n "); fprintf(fpInfo, "\t%d\t\t %d\t %10.2f\t %10.2f\t %10.4f\t %10.4f\t %d", NX, NY, (dx*0.1), (dy*0.1), xllcorner, yllcorner,(outputStep+1)); fclose(fpInfo); hipFree(dev_i_ddxi11); hipFree(dev_i_ddxi12); hipFree(dev_i_ddxi21); hipFree(dev_i_ddxi22); hipFree(dev_dettmp); hipFree(dev_bfkt); hipLaunchKernelGGL(( MeanKernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_dxdxi11, dev_dxdxi21, dev_dxdxi12, dev_dxdxi22, dev_J13dxi, dev_J23dxi, dev_J33dxi, dev_J13det, dev_J23det, dev_J33det, dev_invJ11, dev_invJ12, dev_invJ13, dev_invJ21, dev_invJ22, dev_invJ23, dev_invJ31, dev_invJ32, dev_invJ33, dev_Detmin, dev_cvalue, dev_svec, dev_dxdxi11_avgEW, dev_dxdxi21_avgEW, dev_dxdxi12_avgSN, dev_dxdxi22_avgSN, dev_J13dxi_avgEW, dev_J23dxi_avgEW, dev_J33dxi_avgEW, dev_J13det_avgEW, dev_J23det_avgEW, dev_J33det_avgEW, dev_J13dxi_avgSN, dev_J23dxi_avgSN, dev_J33dxi_avgSN, dev_J13det_avgSN, dev_J23det_avgSN, dev_J33det_avgSN, dev_invJ11_avgEW, dev_invJ12_avgEW, dev_invJ13_avgEW, dev_invJ21_avgEW, dev_invJ22_avgEW, dev_invJ23_avgEW, dev_invJ31_avgEW, dev_invJ32_avgEW, dev_invJ33_avgEW, dev_invJ11_avgSN, dev_invJ12_avgSN, dev_invJ13_avgSN, dev_invJ21_avgSN, dev_invJ22_avgSN, dev_invJ23_avgSN, dev_invJ31_avgSN, dev_invJ32_avgSN, dev_invJ33_avgSN, dev_Detmin_avgEW, dev_Detmin_avgSN, dev_cval_avgEW, dev_cval_avgSN, dev_svec_avgEW, dev_svec_avgSN, nxd, nyd); hipDeviceSynchronize(); for (nt = 1; (!nstop) && (nt<100000); nt++){ hipLaunchKernelGGL(( Inflow1Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_inflow, dev_loc, dev_u, dev_cvalue, phiS0, locflowlen, Iniflowlen, inflowCount, dev_dire, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Inflow2Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_inflow, dev_loc, dev_u, locflowlen, Iniflowlen, inflowCount, dev_dire, nxd, nyd); hipDeviceSynchronize(); for (io=0; io<2; io++){ if(io == 0){ hipLaunchKernelGGL(( UzeroKernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_uzero, nxd, nyd); hipDeviceSynchronize(); } hipDeviceSynchronize(); hipLaunchKernelGGL(( Boundary5Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, nxd, nyd, nx ,ny); hipDeviceSynchronize(); hipLaunchKernelGGL(( Boundary6Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, nxd, nyd, nx ,ny); hipDeviceSynchronize(); hipLaunchKernelGGL(( Boundary7Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, nxd, nyd, nx ,ny); hipDeviceSynchronize(); hipLaunchKernelGGL(( Boundary9Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_Hpx, dev_Hpy, dev_Ppx, dev_Ppy, dev_PDx, dev_PDy, dev_ux, dev_uy, dev_apEW, dev_apSN, dev_apFEW, dev_apFSN, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( TVD1Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_dux, dev_duy, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( TVD2Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_dux, dev_duy, dev_sgnAx, dev_sgnBx, dev_sgnAy, dev_sgnBy, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( TVD3Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_dux, dev_duy, dev_sgnAx, dev_sgnBx, dev_sgnAy, dev_sgnBy, dev_t1x, dev_t2x, dev_t1y, dev_t2y, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( TVD4Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_t1x, dev_t2x, dev_t1y, dev_t2y, dev_sgnAx, dev_sgnBx, dev_sgnAy, dev_sgnBy, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( TVD5Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_t1x, dev_t2x, dev_t1y, dev_t2y, dev_sgnAx, dev_sgnBx, dev_sgnAy, dev_sgnBy, dev_ux, dev_uy, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( InterfacesKernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_ux, dev_uy, dev_uE, dev_uW, dev_uN, dev_uS, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Interfaces2Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_uE, dev_uW, dev_uN, dev_uS, nxd, nyd,nx ,ny); hipDeviceSynchronize(); hipLaunchKernelGGL(( KeepPositivi1Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_uE, dev_uW, dev_uN, dev_uS, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( KeepPositivi2Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_uE, dev_uW, dev_uN, dev_uS, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux1Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_uE, dev_uW, dev_uN, dev_uS, dev_vexE, dev_veyE, dev_vexW, dev_veyW, dev_vexFE, dev_veyFE, dev_vexFW, dev_veyFW, dev_vexN, dev_veyN, dev_vexS, dev_veyS, dev_vexFN, dev_veyFN, dev_vexFS, dev_veyFS, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux2Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_uE, dev_uW, dev_uN, dev_uS, dev_vexE, dev_veyE, dev_vexW, dev_veyW, dev_vexFE, dev_veyFE, dev_vexFW, dev_veyFW, dev_vexN, dev_veyN, dev_vexS, dev_veyS, dev_vexFN, dev_veyFN, dev_vexFS, dev_veyFS, dev_w_wertE, dev_w_wertW, dev_w_wertFE, dev_w_wertFW, dev_w_wertN, dev_w_wertS, dev_w_wertFN, dev_w_wertFS, dev_svec, dev_cvalue, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux3Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_uE, dev_uW, dev_uN, dev_uS, dev_vexE, dev_veyE, dev_vexW, dev_veyW, dev_vexFE, dev_veyFE, dev_vexFW, dev_veyFW, dev_vexN, dev_veyN, dev_vexS, dev_veyS, dev_vexFN, dev_veyFN, dev_vexFS, dev_veyFS, dev_w_wertE, dev_w_wertW, dev_w_wertFE, dev_w_wertFW, dev_w_wertN, dev_w_wertS, dev_w_wertFN, dev_w_wertFS, dev_q_xiE , dev_q_etE, dev_q_xiW , dev_q_etW, dev_q_xiFE, dev_q_etFE, dev_q_xiFW, dev_q_etFW, dev_NpressFE, dev_NpressFW, dev_M11EW, dev_invJ11_avgEW, dev_invJ12_avgEW, dev_invJ13_avgEW, dev_invJ21_avgEW, dev_invJ22_avgEW, dev_invJ23_avgEW, dev_cval_avgEW, dev_q_xiN , dev_q_etN, dev_q_xiS , dev_q_etS, dev_q_xiFN, dev_q_etFN, dev_q_xiFS, dev_q_etFS, dev_NpressFN, dev_NpressFS, dev_M22SN, dev_invJ11_avgSN, dev_invJ12_avgSN, dev_invJ13_avgSN, dev_invJ21_avgSN, dev_invJ22_avgSN, dev_invJ23_avgSN, dev_cval_avgSN, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux4Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_uE, dev_uW, dev_uN, dev_uS, dev_q_xiE , dev_q_xiW, dev_q_xiFE, dev_q_xiFW, dev_NpressFE, dev_NpressFW, dev_invJ11_avgEW, dev_apE, dev_apW, dev_apFE, dev_apFW, dev_q_etN , dev_q_etS, dev_q_etFN, dev_q_etFS, dev_NpressFN, dev_NpressFS, dev_invJ22_avgSN, dev_apN, dev_apS, dev_apFN, dev_apFS, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux5Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_apE, dev_apW, dev_apFE, dev_apFW, dev_apEW , dev_apFEW, dev_apN, dev_apS, dev_apFN, dev_apFS, dev_apSN, dev_apFSN, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux6Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_apEW, dev_apFEW, dev_apSN, dev_apFSN, dev_em_x , dev_em_y, dev_em_Fx, dev_em_Fy, dev_czw1x, dev_czw2x, dev_czwF1x,dev_czwF2x, dev_czw1y, dev_czw2y, dev_czwF1y,dev_czwF2y, dev_uE, dev_uW, dev_uN, dev_uS, dev_cval_avgEW, dev_cval_avgSN, dev_Detmin_avgEW, dev_Detmin_avgSN, dev_M11EW, dev_M22SN, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux7Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_FpE, dev_FpW, dev_GpN, dev_GpS, dev_czw1x, dev_czw2x, dev_czwF1x,dev_czwF2x, dev_czw1y, dev_czw2y, dev_czwF1y,dev_czwF2y, dev_uE, dev_uW, dev_uN, dev_uS, dev_Detmin_avgEW, dev_Detmin_avgSN, dev_q_xiE, dev_q_xiFE, dev_q_xiW, dev_q_xiFW, dev_q_etN, dev_q_etFN, dev_q_etS, dev_q_etFS, dev_dxdxi11_avgEW, dev_dxdxi21_avgEW, dev_dxdxi12_avgSN, dev_dxdxi22_avgSN, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( CFL1Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_em_x, dev_em_y, dev_em_Fx, dev_em_Fy, dev_em_valS, dev_em_valF, dx, dy, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( CFL2Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_em_valS, dev_em_valF, dev_Val, nxd, nyd); hipDeviceSynchronize(); if(io==0){ int threads = 256; int blocks = 256;//min((arraySize + threads - 1) / threads, 256); hipLaunchKernelGGL(( reduceKernel) , dim3(blocks), dim3(threads), 0, 0, dev_Val, dev_max, arraySize); hipDeviceSynchronize(); hipLaunchKernelGGL(( reduceKernel) , dim3(1), dim3(blocks), 0, 0, dev_max, dev_maxW, blocks); hipDeviceSynchronize(); hipLaunchKernelGGL(( CFL3Kernel) , dim3(1), dim3(threadsPerBlock), 0, 0, dev_result, dev_dtval, dev_maxW, dev_TotalTime); hipDeviceSynchronize(); errCpy = hipMemcpy(dt_h, dev_dtval, sizeof(double)* 1, hipMemcpyDeviceToHost); errCpy = hipMemcpy(TotalStep_h, dev_TotalTime, sizeof(double)* 1, hipMemcpyDeviceToHost); if((*TotalStep_h + *dt_h) >= tf[iter]){ *dt_h = tf[iter] - *TotalStep_h; schreiben = 1; errCpy = hipMemcpy(dev_dtval, dt_h, sizeof(double)* 1, hipMemcpyHostToDevice); iter++; } if(*TotalStep_h >= inflowTime[inflowCount] && inflowCount < (Iniflowlen-1) ){ inflowCount++; } hipDeviceSynchronize(); fprintf(stdout, "\r\t\tSimulation progress: %3.0f %%", (*TotalStep_h*0.1/TotalSim)*100); fflush(stdout); } hipLaunchKernelGGL(( CFL4Kernel) , dim3(1), dim3(threadsPerBlock), 0, 0, dev_result, dev_dt, dev_dtval,dev_TotalTime,io); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux8Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_Hpx, dev_Hpy, dev_Ppx, dev_Ppy, dev_FpE, dev_FpW, dev_GpN, dev_GpS, dev_apEW, dev_apFEW, dev_apSN, dev_apFSN, dev_uE, dev_uW, dev_uN, dev_uS, dev_u, dev_ux, dev_uy, dev_Detmin_avgEW, dev_Detmin_avgSN, dev_cval_avgEW, dev_cval_avgSN, dev_invJ11_avgEW, dev_invJ12_avgEW, dev_invJ13_avgEW, dev_invJ21_avgEW, dev_invJ22_avgEW, dev_invJ23_avgEW, dev_invJ31_avgEW, dev_invJ32_avgEW, dev_invJ33_avgEW, dev_invJ11_avgSN, dev_invJ12_avgSN, dev_invJ13_avgSN, dev_invJ21_avgSN, dev_invJ22_avgSN, dev_invJ23_avgSN, dev_invJ31_avgSN, dev_invJ32_avgSN, dev_invJ33_avgSN, dev_dudxE, dev_dvdxE, dev_dudyE, dev_dvdyE, dev_dudxN, dev_dvdxN, dev_dudyN, dev_dvdyN, dx, dy, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux9Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_duxidxix, dev_dvetdxix, dev_duxidetx, dev_dvetdetx, dev_duxidxiy, dev_dvetdxiy, dev_duxidety, dev_dvetdety, dev_invJ11_avgEW, dev_invJ12_avgEW, dev_invJ13_avgEW, dev_invJ21_avgEW, dev_invJ22_avgEW, dev_invJ23_avgEW, dev_invJ31_avgEW, dev_invJ32_avgEW, dev_invJ33_avgEW, dev_invJ11_avgSN, dev_invJ12_avgSN, dev_invJ13_avgSN, dev_invJ21_avgSN, dev_invJ22_avgSN, dev_invJ23_avgSN, dev_invJ31_avgSN, dev_invJ32_avgSN, dev_invJ33_avgSN, dev_dudxE, dev_dvdxE, dev_dudyE, dev_dvdyE, dev_dudxN, dev_dvdxN, dev_dudyN, dev_dvdyN, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux10Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_PDx, dev_PDy, dev_uE, dev_uW, dev_uN, dev_uS, dev_Detmin_avgEW, dev_Detmin_avgSN, dev_duxidxix, dev_dvetdxix, dev_duxidetx, dev_dvetdetx, dev_duxidxiy, dev_dvetdxiy, dev_duxidety, dev_dvetdety, dev_invJ11_avgEW, dev_invJ12_avgEW, dev_invJ21_avgEW, dev_invJ22_avgEW, dev_invJ11_avgSN, dev_invJ12_avgSN, dev_invJ21_avgSN, dev_invJ22_avgSN, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux11Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_vex, dev_vey, dev_vexF, dev_veyF, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux12Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_w_wert, dev_w_wertF, dev_vex, dev_vey, dev_vexF, dev_veyF, dev_svec, dev_cvalue, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux13Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_w_wert, dev_w_wertF, dev_vex, dev_vey, dev_usw, dev_vel, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux14Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_w_wert, dev_w_wertF, dev_vex, dev_vey, dev_vexF, dev_veyF, dev_vexw, dev_veyw, dev_usw, dev_vel, dev_q_xi, dev_q_et, dev_q_xiF, dev_q_etF, dev_invJ11, dev_invJ12, dev_invJ13, dev_invJ21, dev_invJ22, dev_invJ23, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux15Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_w_wert, dev_w_wertF, dev_vex, dev_vey, dev_vexF, dev_veyF, dev_q_xi, dev_q_et, dev_q_xiF, dev_q_etF, dev_J13dxi, dev_J23dxi, dev_J33dxi, dev_J13det, dev_J23det, dev_J33det, dev_Ac, dev_AcF, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux16Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_Npress1, dev_Npress2, dev_NpressF, dev_Ac, dev_AcF, dev_Detmin, dev_cvalue, dev_u, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux17Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_Npress1, dev_Npress2, dev_NpressF, dev_Ac, dev_AcF, dev_Detmin, dev_svec, dev_vex, dev_vey, dev_vexw, dev_veyw, dev_vexF, dev_veyF, dev_tande, dev_u, dev_s, Cd, N_R, varTheta, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux18Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_v, dev_Detmin, dev_Hpx, dev_Hpy, dev_Ppx, dev_Ppy, dev_PDx, dev_PDy, dev_s, dev_u, dev_uzero, dev_dt, dx, dy, N_R, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux19Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_v, dev_Detmin, dev_u, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux20Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_utmp, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux21Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, nxd, nyd); hipDeviceSynchronize(); if(io == 0) { hipLaunchKernelGGL(( Flux22Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_uone, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux23Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_uzero, dev_usxnew, dev_ufxnew, dev_usxold, dev_ufxold, dev_usynew, dev_ufynew, dev_usyold, dev_ufyold, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux24Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_uone, dev_usxnew, dev_ufxnew, dev_usxold, dev_ufxold, dev_usynew, dev_ufynew, dev_usyold, dev_ufyold, nxd, nyd); hipDeviceSynchronize(); } else{ hipLaunchKernelGGL(( Flux25Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_utwo, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux26Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_uone, dev_usxnew, dev_ufxnew, dev_usxold, dev_ufxold, dev_usynew, dev_ufynew, dev_usyold, dev_ufyold, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux27Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_utwo, dev_usxnew, dev_ufxnew, dev_usxold, dev_ufxold, dev_usynew, dev_ufynew, dev_usyold, dev_ufyold, nxd, nyd); hipDeviceSynchronize(); hipLaunchKernelGGL(( Flux28Kernel) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_result, dev_u, dev_utwo, nxd, nyd); hipDeviceSynchronize(); } if(io) { if(schreiben==1){ errCpy = hipMemcpy(resultHs_h, &dev_u[0 * nyd * nxd], sizeof(double)* arraySize, hipMemcpyDeviceToHost); errCpy = hipMemcpy(resultHf_h, &dev_u[3 * nyd * nxd], sizeof(double)* arraySize, hipMemcpyDeviceToHost); errCpy = hipMemcpy(resultUs_h, &dev_u[1 * nyd * nxd], sizeof(double)* arraySize, hipMemcpyDeviceToHost); errCpy = hipMemcpy(resultVs_h, &dev_u[2 * nyd * nxd], sizeof(double)* arraySize, hipMemcpyDeviceToHost); errCpy = hipMemcpy(resultUf_h, &dev_u[4 * nyd * nxd], sizeof(double)* arraySize, hipMemcpyDeviceToHost); errCpy = hipMemcpy(resultVf_h, &dev_u[5 * nyd * nxd], sizeof(double)* arraySize, hipMemcpyDeviceToHost); errCpy = hipMemcpy(resultphi_h,&dev_u[6 * nyd * nxd], sizeof(double)* arraySize, hipMemcpyDeviceToHost); char outfile_Web[100]; sprintf(outfile_Web,"./result2Pb/%03d.dat",iter); outtime[iter-1] = *TotalStep_h; FILE *fpTmp; fpTmp=fopen("./result2Pb/Time.dat", "w"); for (int nn=0;nn<(iter);nn++){ fprintf(fpTmp, "%20.4f", outtime[nn]); } fclose(fpTmp); FILE *fpout; fpout=fopen(outfile_Web, "w"); fprintf(fpout, "VARIABLES = \"H\", \"phi\", \"Us\", \"Uf\", \"Vs\", \"Vf\"\n "); for (int i=MD;i<nxd-MD;i++) { for (int j=MD;j<nyd-MD;j++) { Htmp = 0.1*(resultHs_h[j * nxd + i] + resultHf_h[j * nxd + i]); hstmp = resultHs_h[j * nxd + i]; hftmp = resultHf_h[j * nxd + i]; if (hstmp > 0.00001){ vstmp = resultVs_h[j * nxd + i]/resultHs_h[j * nxd + i]; ustmp = resultUs_h[j * nxd + i]/resultHs_h[j * nxd + i]; phitmp = resultphi_h[j * nxd + i]; }else{ vstmp = 0.0; ustmp = 0.0; phitmp = 0.0; } if (hftmp > 0.00001){ vftmp = resultVf_h[j * nxd + i]/resultHf_h[j * nxd + i]; uftmp = resultUf_h[j * nxd + i]/resultHf_h[j * nxd + i]; }else{ vftmp = 0.0; uftmp = 0.0; } fprintf(fpout, "%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\n",Htmp,phitmp,ustmp,uftmp,vstmp,vftmp); } } fclose(fpout); if(iter == outsteplen){ nstop = 1; // Totalnt = nt; } // } schreiben = 0; } } } hipDeviceSynchronize(); } if (hipPeekAtLastError() != hipSuccess) { cout << hipGetErrorString(hipPeekAtLastError()) << endl; } errCpy = hipMemcpy(TotalStep_h, dev_TotalTime, sizeof(double)* 1, hipMemcpyDeviceToHost); cout << "\nTotal time : " << *TotalStep_h/10 << " sec "; fprintf(stdout, "\nTotal number of steps: %d\n", nt); if(errCpy != hipSuccess){ printf("\nError cuda Memory copy : %d\n",errCpy); } } void RunKernels::memoryMalloc() { hipError_t errdevice ; errdevice = hipMalloc((void **)&dev_topo, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_depth, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_result, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_bfkt, sizeof(double) * arraySize * 3); errdevice = hipMalloc((void **)&dev_posx, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_posy, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dxdxi11, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dxdxi12, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dxdxi21, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dxdxi22, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dbdx , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dbdy , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_cvalue, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_svec , sizeof(double) * arraySize * 2); errdevice = hipMalloc((void **)&dev_Jacb31, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_Jacb32, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dettmp, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_Detmin, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_i_ddxi11, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_i_ddxi12, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_i_ddxi21, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_i_ddxi22, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ11, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ12, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ13, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ21, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ22, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ23, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ31, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ32, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ33, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_u , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_uzero , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_Hpx , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_Hpy , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_Ppx , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_Ppy , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_PDx , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_PDy , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_ux , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_uy , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_dux , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_duy , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_t1x , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_t2x , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_t1y , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_t2y , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_sgnAx , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_sgnBx , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_sgnAy , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_sgnBy , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_uE , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_uW , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_uN , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_uS , sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_tande , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J13dxi , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J23dxi , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J33dxi , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J13det , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J23det , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J33det , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apFEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apFSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dxdxi11_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dxdxi21_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dxdxi12_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dxdxi22_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J13dxi_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J23dxi_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J33dxi_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J13det_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J23det_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J33det_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J13dxi_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J23dxi_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J33dxi_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J13det_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J23det_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_J33det_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ11_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ12_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ13_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ21_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ22_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ23_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ31_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ32_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ33_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ11_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ12_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ13_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ21_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ22_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ23_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ31_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ32_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_invJ33_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_Detmin_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_Detmin_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_cval_avgEW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_cval_avgSN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_svec_avgEW , sizeof(double) * arraySize * 2); errdevice = hipMalloc((void **)&dev_svec_avgSN , sizeof(double) * arraySize * 2); errdevice = hipMalloc((void **)&dev_vexE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vexW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_veyE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_veyW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_w_wertE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_w_wertW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vexFE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vexFW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_veyFE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_veyFW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_w_wertFE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_w_wertFW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vexN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vexS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_veyN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_veyS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_w_wertN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_w_wertS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vexFN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vexFS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_veyFN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_veyFS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_w_wertFN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_w_wertFS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_xiE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_etE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_xiW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_etW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_xiFE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_etFE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_xiFW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_etFW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_NpressFE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_NpressFW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_M11EW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_xiN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_etN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_xiS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_etS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_xiFN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_etFN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_xiFS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_etFS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_NpressFN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_NpressFS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_M22SN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apFE , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apFW , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apFN , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_apFS , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_em_x , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_em_y , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_em_Fx, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_em_Fy, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_FpE, sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_FpW, sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_GpN, sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_GpS, sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_czw1x , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_czw2x , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_czwF1x, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_czwF2x, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_czw1y , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_czw2y , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_czwF1y, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_czwF2y, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_em_valS, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_em_valF, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_Val , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dudxE, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dvdxE, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dudyE, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dvdyE, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dudxN, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dvdxN, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dudyN, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dvdyN, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_duxidxix, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dvetdxix, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_duxidetx, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dvetdetx, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_duxidxiy, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dvetdxiy, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_duxidety, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_dvetdety, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vex , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vey , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vexF , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_veyF , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_w_wert , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_w_wertF, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_usw , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vel , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_vexw , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_veyw , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_xi , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_et , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_xiF , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_q_etF , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_Ac , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_AcF , sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_Npress1, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_Npress2, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_NpressF, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_s, sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_v, sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_uone, sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_utwo, sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_usxnew, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_ufxnew, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_usxold, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_ufxold, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_usynew, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_ufynew, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_usyold, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_ufyold, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_utmp, sizeof(double) * arraySize * 7); errdevice = hipMalloc((void **)&dev_waveSpeed, sizeof(double) * arraySize); errdevice = hipMalloc((void **)&dev_max, sizeof(double) * 256); errdevice = hipMalloc((void **)&dev_maxW, sizeof(double) * 1); errdevice = hipMalloc((void **)&dev_TotalTime, sizeof(double) * 1); errdevice = hipMalloc((void **)&dev_dt, sizeof(double) * 1); errdevice = hipMalloc((void **)&dev_dtval, sizeof(double) * 1); errdevice = hipMalloc((void **)&dev_inflow, sizeof(double) * (inflowSize * 3)); errdevice = hipMalloc((void **)&dev_loc, sizeof(double) * (locflowlen * 3)); errdevice = hipMalloc((void **)&dev_dire, sizeof(int) * (locflowlen * 3)); if(errdevice != hipSuccess){ printf("\nCould not allocate Device memory : %d\n",errdevice); } } void RunKernels::freeMemory() { hipFree(dev_topo); hipFree(dev_depth); hipFree(dev_result); // hipFree(dev_bfkt); hipFree(dev_posx); hipFree(dev_posy); hipFree(dev_dxdxi11); hipFree(dev_dxdxi12); hipFree(dev_dxdxi21); hipFree(dev_dxdxi22); hipFree(dev_dbdx); hipFree(dev_dbdy); hipFree(dev_cvalue); hipFree(dev_svec); hipFree(dev_Jacb31); hipFree(dev_Jacb32); // hipFree(dev_dettmp); hipFree(dev_Detmin); // hipFree(dev_i_ddxi11); // hipFree(dev_i_ddxi12); // hipFree(dev_i_ddxi21); // hipFree(dev_i_ddxi22); hipFree(dev_invJ11); hipFree(dev_invJ12); hipFree(dev_invJ13); hipFree(dev_invJ21); hipFree(dev_invJ22); hipFree(dev_invJ23); hipFree(dev_invJ31); hipFree(dev_invJ32); hipFree(dev_invJ33); hipFree(dev_u); hipFree(dev_uzero); hipFree(dev_tande); hipFree(dev_J13dxi); hipFree(dev_J23dxi); hipFree(dev_J33dxi); hipFree(dev_J13det); hipFree(dev_J23det); hipFree(dev_J33det); hipFree(dev_Hpx); hipFree(dev_Hpy); hipFree(dev_Ppx); hipFree(dev_Ppy); hipFree(dev_PDx); hipFree(dev_PDy); hipFree(dev_ux); hipFree(dev_uy); hipFree(dev_apEW); hipFree(dev_apSN); hipFree(dev_apFEW); hipFree(dev_apFSN); hipFree(dev_dux); hipFree(dev_duy); hipFree(dev_t1x); hipFree(dev_t2x); hipFree(dev_t1y); hipFree(dev_t2y); hipFree(dev_sgnAx); hipFree(dev_sgnBx); hipFree(dev_sgnAy); hipFree(dev_sgnBy); hipFree(dev_dxdxi11_avgEW); hipFree(dev_dxdxi21_avgEW); hipFree(dev_dxdxi12_avgSN); hipFree(dev_dxdxi22_avgSN); hipFree(dev_J13dxi_avgEW); hipFree(dev_J23dxi_avgEW); hipFree(dev_J33dxi_avgEW); hipFree(dev_J13det_avgEW); hipFree(dev_J23det_avgEW); hipFree(dev_J33det_avgEW); hipFree(dev_J13dxi_avgSN); hipFree(dev_J23dxi_avgSN); hipFree(dev_J33dxi_avgSN); hipFree(dev_J13det_avgSN); hipFree(dev_J23det_avgSN); hipFree(dev_J33det_avgSN); hipFree(dev_invJ11_avgEW); hipFree(dev_invJ12_avgEW); hipFree(dev_invJ13_avgEW); hipFree(dev_invJ21_avgEW); hipFree(dev_invJ22_avgEW); hipFree(dev_invJ23_avgEW); hipFree(dev_invJ31_avgEW); hipFree(dev_invJ32_avgEW); hipFree(dev_invJ33_avgEW); hipFree(dev_invJ11_avgSN); hipFree(dev_invJ12_avgSN); hipFree(dev_invJ13_avgSN); hipFree(dev_invJ21_avgSN); hipFree(dev_invJ22_avgSN); hipFree(dev_invJ23_avgSN); hipFree(dev_invJ31_avgSN); hipFree(dev_invJ32_avgSN); hipFree(dev_invJ33_avgSN); hipFree(dev_Detmin_avgEW); hipFree(dev_Detmin_avgSN); hipFree(dev_cval_avgEW); hipFree(dev_cval_avgSN); hipFree(dev_svec_avgEW); hipFree(dev_svec_avgSN); hipFree(dev_uE); hipFree(dev_uW); hipFree(dev_uN); hipFree(dev_uS); hipFree(dev_vexE); hipFree(dev_vexW); hipFree(dev_veyE); hipFree(dev_veyW); hipFree(dev_w_wertE); hipFree(dev_w_wertW); hipFree(dev_vexFE); hipFree(dev_vexFW); hipFree(dev_veyFE); hipFree(dev_veyFW); hipFree(dev_w_wertFE); hipFree(dev_w_wertFW); hipFree(dev_vexN); hipFree(dev_vexS); hipFree(dev_veyN); hipFree(dev_veyS); hipFree(dev_w_wertFN); hipFree(dev_w_wertFS); hipFree(dev_q_xiE); hipFree(dev_q_etE); hipFree(dev_q_xiW); hipFree(dev_q_etW); hipFree(dev_q_xiFE); hipFree(dev_q_etFE); hipFree(dev_q_xiFW); hipFree(dev_q_etFW); hipFree(dev_NpressFE); hipFree(dev_NpressFW); hipFree(dev_M11EW); hipFree(dev_q_xiN); hipFree(dev_q_etN); hipFree(dev_q_xiS); hipFree(dev_q_etS); hipFree(dev_q_xiFN); hipFree(dev_q_etFN); hipFree(dev_q_xiFS); hipFree(dev_q_etFS); hipFree(dev_NpressFN); hipFree(dev_NpressFS); hipFree(dev_M22SN); hipFree(dev_apE); hipFree(dev_apW); hipFree(dev_apFE); hipFree(dev_apFW); hipFree(dev_apN); hipFree(dev_apS); hipFree(dev_apFN); hipFree(dev_apFS); hipFree(dev_em_x); hipFree(dev_em_y); hipFree(dev_em_Fx); hipFree(dev_em_Fy); hipFree(dev_FpE); hipFree(dev_FpW); hipFree(dev_GpN); hipFree(dev_GpS); hipFree(dev_czw1x); hipFree(dev_czw2x); hipFree(dev_czwF1x); hipFree(dev_czwF2x); hipFree(dev_czw1y); hipFree(dev_czw2y); hipFree(dev_czwF1y); hipFree(dev_czwF2y); hipFree(dev_em_valS); hipFree(dev_em_valF); hipFree(dev_Val); hipFree(dev_dudxE); hipFree(dev_dvdxE); hipFree(dev_dudyE); hipFree(dev_dvdyE); hipFree(dev_dudxN); hipFree(dev_dvdxN); hipFree(dev_dudyN); hipFree(dev_dvdyN); hipFree(dev_duxidxix); hipFree(dev_dvetdxix); hipFree(dev_duxidetx); hipFree(dev_dvetdetx); hipFree(dev_duxidxiy); hipFree(dev_dvetdxiy); hipFree(dev_duxidety); hipFree(dev_dvetdety); hipFree(dev_vex); hipFree(dev_vey); hipFree(dev_vexF); hipFree(dev_veyF); hipFree(dev_w_wert); hipFree(dev_w_wertF); hipFree(dev_usw); hipFree(dev_vel); hipFree(dev_vexw); hipFree(dev_veyw); hipFree(dev_q_xi); hipFree(dev_q_et); hipFree(dev_q_xiF); hipFree(dev_q_etF); hipFree(dev_Ac); hipFree(dev_AcF); hipFree(dev_Npress1); hipFree(dev_Npress2); hipFree(dev_NpressF); hipFree(dev_s); hipFree(dev_v); hipFree(dev_uone); hipFree(dev_utwo); hipFree(dev_usxnew); hipFree(dev_ufxnew); hipFree(dev_usxold); hipFree(dev_ufxold); hipFree(dev_utmp); hipFree(dev_inflow); hipFree(dev_loc); hipFree(dev_dire); hipFree(dev_waveSpeed); hipFree(dev_max); hipFree(dev_maxW); hipFree(dev_TotalTime); hipFree(dev_dt); }
414a541c55082fcbd47e4bdf2addfd0812cb634c.cu
#include "adNOC_2Pb_runKernels.cuh" #include <assert.h> RunKernels::RunKernels(char *fname) { readinParamFile(fname); dataInitialization(); } void RunKernels::readinParamFile(char *fname) { FILE* FID = fopen(fname, "r"); if(!FID) { printf("Parameter file %s is missing. Abort...\n", fname); exit(1); } char line[1024]; readParamLine(FID, line, 1024); sscanf(line, "%lf", &TotalSim); readParamLine(FID, line, 1024); sscanf(line, "%lf", &EachOut); readParamLine(FID, line, 1024); sscanf(line, "%lf", &delta0); readParamLine(FID, line, 1024); sscanf(line, "%lf", &Cd); readParamLine(FID, line, 1024); sscanf(line, "%lf", &N_R); readParamLine(FID, line, 1024); sscanf(line, "%lf", &varTheta); readParamLine(FID, line, 1024); sscanf(line, "%lf", &phiS0); readParamLine(FID, line, 1024); sscanf(line, "%s", DEMData); readParamLine(FID, line, 1024); sscanf(line, "%s", IniHData); readParamLine(FID, line, 1024); sscanf(line, "%s", IniUData); readParamLine(FID, line, 1024); sscanf(line, "%s", IniVData); readParamLine(FID, line, 1024); sscanf(line, "%s", locData); } void RunKernels::readParamLine(FILE *fid, char *line, int len) { char *c; fgets(line, len, fid); // Remove the things after '#', or end of line ('\n') for(c = line; *c; c++) { int br = 0; switch(*c) { case '#': case '\n': *c = 0; br = 1; break; default: break; } if(br) break; } return; } void RunKernels::split(const string& s, vector<string>& sv, const char* delim) { sv.clear(); char* buffer = new char[s.size() + 1]; buffer[s.size()] = '\0'; copy(s.begin(), s.end(), buffer); char* p = std::strtok(buffer, delim); do { sv.push_back(p); } while ((p = strtok(NULL, delim))); delete[] buffer; return; } void RunKernels::dataInitialization() { fprintf(stdout,"\n\t***********************************************************************************\n"); fprintf(stdout,"\t\t* * * * * MoSES_2PDF: inflow (debris flow) mode * * * * * \n"); fprintf(stdout,"\t-----------------------------------------------------------------------------------\n"); fprintf(stdout,"\t\t2D Central Scheme (adNOC) Mixture Code in CUDA: inflow mode\n"); fprintf(stdout,"\t\tAuthor : Chi-Jyun Ko, Po-Chih Chen, Hock-Kiet Wong and Yih-Chin Tai\n"); fprintf(stdout,"\t\tLab for Computer Simulation and Visualization (CSV Lab), NCKU, Taiwan\n"); fprintf(stdout,"\t***********************************************************************************\n\n"); ifstream inputFileTopo; ifstream inputFileInitH; ifstream inputFileInitU; ifstream inputFileInitV; ifstream inputFileloc; inputFileTopo.open(DEMData, ios::in); inputFileInitH.open(IniHData, ios::in); inputFileInitU.open(IniUData, ios::in); inputFileInitV.open(IniVData, ios::in); inputFileloc.open(locData, ios::in); fprintf(stdout,"\t\tinput Topo File : %s\n", DEMData); fprintf(stdout,"\t\tinput Initial File (H) : %s\n", IniHData); fprintf(stdout,"\t\tinput Initial File (U) : %s\n", IniUData); fprintf(stdout,"\t\tinput Initial File (V) : %s\n\n", IniVData); // read Topo data if (inputFileTopo.fail() ){ printf("\n\t----------------------------------\n"); printf("\t Error can't open Topo file.\n"); printf("\t----------------------------------\n"); assert(0); } // read init data if (inputFileInitH.fail() ){ printf("\n\t-------------------------------------------------\n"); printf("\t Error can't open Initial depth file.\n"); printf("\t-------------------------------------------------\n"); assert(0); } if (inputFileInitU.fail() ){ printf("\n\t-------------------------------------------------\n"); printf("\t Error can't open Initial velocity(U) file.\n"); printf("\t-------------------------------------------------\n"); assert(0); } if (inputFileInitV.fail() ){ printf("\n\t-------------------------------------------------\n"); printf("\t Error can't open Initial velocity(V) file.\n"); printf("\t-------------------------------------------------\n"); assert(0); } if (inputFileloc.fail() ){ printf("\n\t-------------------------------------------------\n"); printf("\t Error can't open Inflow location file.\n"); printf("\t-------------------------------------------------\n"); assert(0); } while(getline(inputFileTopo,TopoS)){ inputTopoTmp.push_back(TopoS); } getline(inputFileInitH, IniHS); while(getline(inputFileInitH,IniHS)){ inputIniHTmp.push_back(IniHS); } getline(inputFileInitU, IniUS); while(getline(inputFileInitU,IniUS)){ inputIniUTmp.push_back(IniUS); } getline(inputFileInitV, IniVS); while(getline(inputFileInitV,IniVS)){ inputIniVTmp.push_back(IniVS); } getline(inputFileloc, locS); while(getline(inputFileloc,locS)){ inputlocTmp.push_back(locS); } // read Topo file for(int i=0;i<6;i++){ split(inputTopoTmp[i], Topodata, " "); if(i==0){ NX = stoi(Topodata[1]); } else if(i==1){ NY = stoi(Topodata[1]); } else if(i==2){ xllcorner = stof(Topodata[1]); }else if(i==3){ yllcorner = stof(Topodata[1]); } else if(i==4){ dx = stof(Topodata[1]); dy = stof(Topodata[1]); } else if(i==5){ if(Topodata[0] == "NODATA_value"){ StartTopo = 0; } else{ StartTopo = 1; } } } Iniflowlen = inputIniHTmp.size(); locflowlen = inputlocTmp.size(); nx = NX; ny = NY; dx = dx*10; dy = dy*10; MINX = 0.0; MINY = 0.0; MAXX = dx*(nx-1); MAXY = dy*(ny-1); nxd = nx + 2*MD; nyd = ny + 2*MD; nxyd = max(nxd,nyd); arraySize = nxd * nyd; fprintf(stdout,"\t\tData points : %d,%d\n", nx, ny); fprintf(stdout,"\t\tDomain [dm] : (%5.3f,%5.3f)(%5.3f,%5.3f)\n", MINX, MAXX, MINY, MAXY); fprintf(stdout,"\t\tGrid size : %6.2f,%6.2f (%d,%d)\n",dx,dy,nx,ny); fprintf(stdout,"\t\tCFL number : %5.3f\n", CFL); fprintf(stdout,"\t\tdelta0 : %5.3f\n", delta0); fprintf(stdout,"\t\tCd : %5.3f\n", Cd); fprintf(stdout,"\t\tN_R : %5.3f\n", N_R); fprintf(stdout,"\t\tvarTheta : %5.3f\n", varTheta); fprintf(stdout,"\t\tinitial value of solid volume fraction : %5.3f\n\n", phiS0); fprintf(stdout,"\t\tTotal simulation time (sec) : %5.3f\n", TotalSim); fprintf(stdout,"\t\tEach output time (sec) : %5.3f\n\n", EachOut); NEW_MATRIX(topo, double, nxd, nyd); NEW_MATRIX(depth, double, nxd, nyd); NEW_MATRIX(inputLoc, double, 3, locflowlen); NEW_3DMATRIX(inputFlow, double, locflowlen, Iniflowlen, 3); NEW_ARRAY(inflowTime, double, Iniflowlen); NEW_MATRIX(dire, int, 3, locflowlen); // input Topo to matrix if(StartTopo==1){ for(int j=0; j<NY; j++) { split(inputTopoTmp[j+5], Topodata, " "); for(int i=0; i<NX; i++) { topo[i+MD][j+MD] = stof(Topodata[i]); topo[i+MD][j+MD] = 10.0*topo[i+MD][j+MD]; if(topo[i+MD][j+MD]<0){ topo[i+MD][j+MD] = 0; } } } } else{ for(int j=0; j<NY; j++) { split(inputTopoTmp[j+6], Topodata, " "); for(int i=0; i<NX; i++) { topo[i+MD][j+MD] = stof(Topodata[i]); topo[i+MD][j+MD] = 10.0*topo[i+MD][j+MD]; if(topo[i+MD][j+MD]<0){ topo[i+MD][j+MD] = 0; } } } } // input Initial flow inflowSize = Iniflowlen*locflowlen; for(int j=0; j<Iniflowlen; j++) { split(inputIniHTmp[j], IniHdata, "\t"); split(inputIniUTmp[j], IniUdata, "\t"); split(inputIniVTmp[j], IniVdata, "\t"); inflowTime[j] = stof(IniHdata[0])*10; for(int i=0; i<locflowlen; i++) { inputFlow[i][j][0] = 10.0*stof(IniHdata[i+1]); inputFlow[i][j][1] = stof(IniUdata[i+1]); inputFlow[i][j][2] = stof(IniVdata[i+1]); } } // input Initial flow location for(int j=0; j<locflowlen; j++) { split(inputlocTmp[j], locdata, "\t"); // cout <<stof(IniHdata[0]) <<" "; for(int i=0; i<3; i++) { inputLoc[i][j] = stof(locdata[i]); // cout<<inputLoc[i][j] <<" "; } // cout <<"\n"; } // B.C. initioal condition for(int i=0; i<MD; i++) { for(int j=MD; j<(ny+MD); j++) { topo[i ][j] = topo[MD ][j]; topo[nx+MD+i][j] = topo[nx+MD-1][j]; } } for(int j=0; j<MD; j++) { for(int i=0; i<(nx+2*MD); i++) { topo[i][j ] = topo[i][MD ]; topo[i][ny+MD+j] = topo[i][ny+MD-1]; } } } clock_t RunKernels::run() { cudaError_t errhost ; errhost = cudaMallocHost((void **)&TotalStep_h, sizeof(double)); errhost = cudaMallocHost((void **)&dt_h, sizeof(double)); errhost = cudaMallocHost((void **)&depth_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&topo_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&speed_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&resultHs_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&resultHf_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&resultUs_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&resultVs_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&resultUf_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&resultVf_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&resultphi_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&bfkt_h, sizeof(double) * arraySize * 3); errhost = cudaMallocHost((void **)&svec_h, sizeof(double) * arraySize * 2); errhost = cudaMallocHost((void **)&cvalue_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&result_h, sizeof(double) * arraySize); errhost = cudaMallocHost((void **)&inflow_h, sizeof(double) * inflowSize * 3); errhost = cudaMallocHost((void **)&loc_h, sizeof(double) * locflowlen * 3); errhost = cudaMallocHost((void **)&dire_h, sizeof(int) * locflowlen * 3); if(errhost != cudaSuccess){ printf("\nCould not allocate Host memory : %d\n",errhost); } for(int i = 0; i < nxd; i++){ for(int j = 0; j < nyd; j++){ topo_h[j*nxd+i] = topo[i][j]; // depth_h[j*nxd+i] = depth[i][j]; } } for(int m=0;m<3;m++){ for(int j = 0; j < Iniflowlen; j++){ for(int i = 0; i < locflowlen; i++){ inflow_h[m * Iniflowlen * locflowlen + j*locflowlen + i] = inputFlow[i][j][m]; } } } for(int j=0;j<locflowlen;j++){ for(int i=0;i<3;i++){ if((inputLoc[2][j] == 0) || (inputLoc[2][j] == NY)){ dire[0][j] = inputLoc[0][j]; dire[1][j] = -5; //dire[0] direction in X dire[2][j] = 0; //dire[1] direction in Y }else{ dire[0][j] = inputLoc[0][j]; dire[1][j] = 0; dire[2][j] = -5; } // cout<<dire[i][j]<<" "; } // cout<<endl; } for(int i = 0; i < 3; i++){ for(int j = 0; j < locflowlen; j++){ loc_h[j*3+i] = inputLoc[i][j]; dire_h[j*3+i] = dire[i][j]; } } clock_t start, end; start = clock(); //cuda start memoryMalloc(); kernelStep(); freeMemory(); end = clock(); //cuda stop // outputFile(); return end - start; } void RunKernels::kernelStep() { cudaError_t errMem, errCpy ; errMem = cudaMemset(dev_TotalTime, 0.0, sizeof(double)); errMem = cudaMemset(dev_dt, 0.0, sizeof(double)); errMem = cudaMemset(dev_dtval, 0.0, sizeof(double)); errMem = cudaMemset(dt_h, 0.0, sizeof(double)); if(errMem != cudaSuccess){ printf("\nError cuda Memory set : %d\n",errMem); } errCpy = cudaMemcpy(dev_topo, topo_h, sizeof(double) * arraySize, cudaMemcpyHostToDevice); errCpy = cudaMemcpy(dev_inflow, inflow_h, sizeof(double) * (inflowSize * 3), cudaMemcpyHostToDevice); errCpy = cudaMemcpy(dev_loc, loc_h, sizeof(double) * (locflowlen * 3), cudaMemcpyHostToDevice); errCpy = cudaMemcpy(dev_dire, dire_h, sizeof(int) * (locflowlen * 3), cudaMemcpyHostToDevice); int bx = (nxd + BLOCK_SIZE - 1) / BLOCK_SIZE; int by = (nyd + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 blocksPerGrid(bx, by); dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); double Htmp, hstmp, hftmp, ustmp, uftmp, vstmp, vftmp, phitmp; int outputStep = TotalSim/EachOut; double tf[outputStep+1] = {0.0}; for(int ii=1;ii<=outputStep;ii++){ tf[ii] = ii*EachOut*10; } double outtime[outputStep+1]={0}; int iter = 1, nt, io; int nstop = 0, schreiben =0; // int Totalnt = 0; int outsteplen = sizeof(tf)/sizeof(tf)[0]; int inflowCount =0; makeTopo1Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_bfkt, MINX, MINY, dx, dy, nxd, nyd); cudaDeviceSynchronize(); makeTopo2Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_topo, dev_bfkt, dx, dy, nxd, nyd, nx, ny); cudaDeviceSynchronize(); makeTopo3Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_bfkt, dev_posx, dev_posy, nxd, nyd); cudaDeviceSynchronize(); makeTopo4Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_bfkt, dev_posx, dev_posy, dev_dxdxi11, dev_dxdxi12, dev_dxdxi21, dev_dxdxi22, dev_dbdx, dev_dbdy, nxd, nyd); cudaDeviceSynchronize(); makeTopo5Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_dbdx, dev_dbdy, dev_cvalue, nxd, nyd); cudaDeviceSynchronize(); makeTopo6Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_dbdx, dev_dbdy, dev_cvalue, dev_svec, nxd, nyd); cudaDeviceSynchronize(); makeTopo7Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_dbdx, dev_dbdy, dev_cvalue, dev_svec, dev_Jacb31, dev_Jacb32, dev_dxdxi11, dev_dxdxi12, dev_dxdxi21, dev_dxdxi22, dev_dettmp, nxd, nyd); cudaDeviceSynchronize(); makeTopo8Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_cvalue, dev_svec, dev_Jacb31, dev_Jacb32, dev_dxdxi11, dev_dxdxi12, dev_dxdxi21, dev_dxdxi22, dev_dettmp, dev_Detmin, dev_i_ddxi11, dev_i_ddxi12, dev_i_ddxi21, dev_i_ddxi22, nxd, nyd); cudaDeviceSynchronize(); makeTopo9Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_cvalue, dev_svec, dev_i_ddxi11, dev_i_ddxi12, dev_i_ddxi21, dev_i_ddxi22, dev_invJ11, dev_invJ12, dev_invJ13, dev_invJ21, dev_invJ22, dev_invJ23, dev_invJ31, dev_invJ32, dev_invJ33, nxd, nyd); cudaDeviceSynchronize(); makeTopo11Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_tande, delta0, nxd, nyd); cudaDeviceSynchronize(); Boundary1Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_dxdxi11, dev_dxdxi12, dev_dxdxi21, dev_dxdxi22, dev_cvalue, dev_Detmin, dev_svec, dev_Jacb31, dev_Jacb32, dev_invJ11, dev_invJ12, dev_invJ13, dev_invJ21, dev_invJ22, dev_invJ23, dev_invJ31, dev_invJ32, dev_invJ33, nxd, nyd, nx ,ny); cudaDeviceSynchronize(); Boundary2Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_dxdxi11, dev_dxdxi12, dev_dxdxi21, dev_dxdxi22, dev_cvalue, dev_Detmin, dev_svec, dev_Jacb31, dev_Jacb32, dev_invJ11, dev_invJ12, dev_invJ13, dev_invJ21, dev_invJ22, dev_invJ23, dev_invJ31, dev_invJ32, dev_invJ33, nxd, nyd, nx ,ny); cudaDeviceSynchronize(); JacobKernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_svec, dev_cvalue, dev_posx, dev_posy, dev_J13dxi, dev_J23dxi, dev_J33dxi, dev_J13det, dev_J23det, dev_J33det, nxd, nyd); cudaDeviceSynchronize(); Boundary3Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_J13dxi, dev_J23dxi, dev_J33dxi, dev_J13det, dev_J23det, dev_J33det, nxd, nyd, nx ,ny); cudaDeviceSynchronize(); errCpy = cudaMemcpy(bfkt_h, &dev_bfkt[0], sizeof(double)* arraySize * 3, cudaMemcpyDeviceToHost); errCpy = cudaMemcpy(svec_h, &dev_svec[0], sizeof(double)* arraySize * 2, cudaMemcpyDeviceToHost); errCpy = cudaMemcpy(cvalue_h, &dev_cvalue[0], sizeof(double)* arraySize, cudaMemcpyDeviceToHost); FILE *fpTopo; if ((fpTopo=fopen("./result2Pb/DEM.dat", "w")) == NULL) { printf("\n\t---------------------------------------------------------\n"); printf("\t Error can't open \"result2Pb\" folder.\n"); printf("\t Need build the result directory --> mkdir result2Pb\n"); printf("\t---------------------------------------------------------\n"); fclose(fpTopo); exit(0); } fprintf(fpTopo, "VARIABLES = \"x\", \"y\", \"z\", \"c\", \"S1\", \"S2\"\n "); for (int i=MD;i<nxd-MD;i++) { for (int j=MD;j<nyd-MD;j++) { fprintf(fpTopo, "%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\n",bfkt_h[0 * nxd * nyd + j * nxd + i]*0.1, bfkt_h[1 * nxd * nyd + j * nxd + i]*0.1, bfkt_h[2 * nxd * nyd + j * nxd + i]*0.1, cvalue_h[j * nxd + i],svec_h[0 * nxd * nyd + j * nxd + i],svec_h[1 * nxd * nyd + j * nxd + i]); } } fclose(fpTopo); FILE *fpInit; if ((fpInit=fopen("./result2Pb/001.dat", "w")) == NULL) { printf("\n\t---------------------------------------------------------\n"); printf("\t Error can't open \"result2Pb\" folder.\n"); printf("\t---------------------------------------------------------\n"); fclose(fpInit); exit(0); } fprintf(fpInit, "VARIABLES = \"H\", \"phi\", \"Us\", \"Uf\", \"Vs\", \"Vf\"\n "); for (int i=MD;i<nxd-MD;i++) { for (int j=MD;j<nyd-MD;j++) { fprintf(fpInit, "%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\n",0.1*depth[i][j],phiS0, 0.0, 0.0, 0.0, 0.0); } } fclose(fpInit); FILE *fpInfo; if ((fpInfo=fopen("./result2Pb/Info.dat", "w")) == NULL) { printf("\n\t---------------------------------------------------------\n"); printf("\t Error can't open \"result2Pb\" folder.\n"); printf("\t---------------------------------------------------------\n"); fclose(fpInfo); exit(0); } fprintf(fpInfo, "VARIABLES = \"x-point\", \"y-point\", \"dx\", \"dy\", \"xllcorner\", \"yllcorner\", \"TotalStep\"\n "); fprintf(fpInfo, "\t%d\t\t %d\t %10.2f\t %10.2f\t %10.4f\t %10.4f\t %d", NX, NY, (dx*0.1), (dy*0.1), xllcorner, yllcorner,(outputStep+1)); fclose(fpInfo); cudaFree(dev_i_ddxi11); cudaFree(dev_i_ddxi12); cudaFree(dev_i_ddxi21); cudaFree(dev_i_ddxi22); cudaFree(dev_dettmp); cudaFree(dev_bfkt); MeanKernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_dxdxi11, dev_dxdxi21, dev_dxdxi12, dev_dxdxi22, dev_J13dxi, dev_J23dxi, dev_J33dxi, dev_J13det, dev_J23det, dev_J33det, dev_invJ11, dev_invJ12, dev_invJ13, dev_invJ21, dev_invJ22, dev_invJ23, dev_invJ31, dev_invJ32, dev_invJ33, dev_Detmin, dev_cvalue, dev_svec, dev_dxdxi11_avgEW, dev_dxdxi21_avgEW, dev_dxdxi12_avgSN, dev_dxdxi22_avgSN, dev_J13dxi_avgEW, dev_J23dxi_avgEW, dev_J33dxi_avgEW, dev_J13det_avgEW, dev_J23det_avgEW, dev_J33det_avgEW, dev_J13dxi_avgSN, dev_J23dxi_avgSN, dev_J33dxi_avgSN, dev_J13det_avgSN, dev_J23det_avgSN, dev_J33det_avgSN, dev_invJ11_avgEW, dev_invJ12_avgEW, dev_invJ13_avgEW, dev_invJ21_avgEW, dev_invJ22_avgEW, dev_invJ23_avgEW, dev_invJ31_avgEW, dev_invJ32_avgEW, dev_invJ33_avgEW, dev_invJ11_avgSN, dev_invJ12_avgSN, dev_invJ13_avgSN, dev_invJ21_avgSN, dev_invJ22_avgSN, dev_invJ23_avgSN, dev_invJ31_avgSN, dev_invJ32_avgSN, dev_invJ33_avgSN, dev_Detmin_avgEW, dev_Detmin_avgSN, dev_cval_avgEW, dev_cval_avgSN, dev_svec_avgEW, dev_svec_avgSN, nxd, nyd); cudaDeviceSynchronize(); for (nt = 1; (!nstop) && (nt<100000); nt++){ Inflow1Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_inflow, dev_loc, dev_u, dev_cvalue, phiS0, locflowlen, Iniflowlen, inflowCount, dev_dire, nxd, nyd); cudaDeviceSynchronize(); Inflow2Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_inflow, dev_loc, dev_u, locflowlen, Iniflowlen, inflowCount, dev_dire, nxd, nyd); cudaDeviceSynchronize(); for (io=0; io<2; io++){ if(io == 0){ UzeroKernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_uzero, nxd, nyd); cudaDeviceSynchronize(); } cudaDeviceSynchronize(); Boundary5Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, nxd, nyd, nx ,ny); cudaDeviceSynchronize(); Boundary6Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, nxd, nyd, nx ,ny); cudaDeviceSynchronize(); Boundary7Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, nxd, nyd, nx ,ny); cudaDeviceSynchronize(); Boundary9Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_Hpx, dev_Hpy, dev_Ppx, dev_Ppy, dev_PDx, dev_PDy, dev_ux, dev_uy, dev_apEW, dev_apSN, dev_apFEW, dev_apFSN, nxd, nyd); cudaDeviceSynchronize(); TVD1Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_dux, dev_duy, nxd, nyd); cudaDeviceSynchronize(); TVD2Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_dux, dev_duy, dev_sgnAx, dev_sgnBx, dev_sgnAy, dev_sgnBy, nxd, nyd); cudaDeviceSynchronize(); TVD3Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_dux, dev_duy, dev_sgnAx, dev_sgnBx, dev_sgnAy, dev_sgnBy, dev_t1x, dev_t2x, dev_t1y, dev_t2y, nxd, nyd); cudaDeviceSynchronize(); TVD4Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_t1x, dev_t2x, dev_t1y, dev_t2y, dev_sgnAx, dev_sgnBx, dev_sgnAy, dev_sgnBy, nxd, nyd); cudaDeviceSynchronize(); TVD5Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_t1x, dev_t2x, dev_t1y, dev_t2y, dev_sgnAx, dev_sgnBx, dev_sgnAy, dev_sgnBy, dev_ux, dev_uy, nxd, nyd); cudaDeviceSynchronize(); InterfacesKernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_ux, dev_uy, dev_uE, dev_uW, dev_uN, dev_uS, nxd, nyd); cudaDeviceSynchronize(); Interfaces2Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_uE, dev_uW, dev_uN, dev_uS, nxd, nyd,nx ,ny); cudaDeviceSynchronize(); KeepPositivi1Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_uE, dev_uW, dev_uN, dev_uS, nxd, nyd); cudaDeviceSynchronize(); KeepPositivi2Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_uE, dev_uW, dev_uN, dev_uS, nxd, nyd); cudaDeviceSynchronize(); Flux1Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_uE, dev_uW, dev_uN, dev_uS, dev_vexE, dev_veyE, dev_vexW, dev_veyW, dev_vexFE, dev_veyFE, dev_vexFW, dev_veyFW, dev_vexN, dev_veyN, dev_vexS, dev_veyS, dev_vexFN, dev_veyFN, dev_vexFS, dev_veyFS, nxd, nyd); cudaDeviceSynchronize(); Flux2Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_uE, dev_uW, dev_uN, dev_uS, dev_vexE, dev_veyE, dev_vexW, dev_veyW, dev_vexFE, dev_veyFE, dev_vexFW, dev_veyFW, dev_vexN, dev_veyN, dev_vexS, dev_veyS, dev_vexFN, dev_veyFN, dev_vexFS, dev_veyFS, dev_w_wertE, dev_w_wertW, dev_w_wertFE, dev_w_wertFW, dev_w_wertN, dev_w_wertS, dev_w_wertFN, dev_w_wertFS, dev_svec, dev_cvalue, nxd, nyd); cudaDeviceSynchronize(); Flux3Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_uE, dev_uW, dev_uN, dev_uS, dev_vexE, dev_veyE, dev_vexW, dev_veyW, dev_vexFE, dev_veyFE, dev_vexFW, dev_veyFW, dev_vexN, dev_veyN, dev_vexS, dev_veyS, dev_vexFN, dev_veyFN, dev_vexFS, dev_veyFS, dev_w_wertE, dev_w_wertW, dev_w_wertFE, dev_w_wertFW, dev_w_wertN, dev_w_wertS, dev_w_wertFN, dev_w_wertFS, dev_q_xiE , dev_q_etE, dev_q_xiW , dev_q_etW, dev_q_xiFE, dev_q_etFE, dev_q_xiFW, dev_q_etFW, dev_NpressFE, dev_NpressFW, dev_M11EW, dev_invJ11_avgEW, dev_invJ12_avgEW, dev_invJ13_avgEW, dev_invJ21_avgEW, dev_invJ22_avgEW, dev_invJ23_avgEW, dev_cval_avgEW, dev_q_xiN , dev_q_etN, dev_q_xiS , dev_q_etS, dev_q_xiFN, dev_q_etFN, dev_q_xiFS, dev_q_etFS, dev_NpressFN, dev_NpressFS, dev_M22SN, dev_invJ11_avgSN, dev_invJ12_avgSN, dev_invJ13_avgSN, dev_invJ21_avgSN, dev_invJ22_avgSN, dev_invJ23_avgSN, dev_cval_avgSN, nxd, nyd); cudaDeviceSynchronize(); Flux4Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_uE, dev_uW, dev_uN, dev_uS, dev_q_xiE , dev_q_xiW, dev_q_xiFE, dev_q_xiFW, dev_NpressFE, dev_NpressFW, dev_invJ11_avgEW, dev_apE, dev_apW, dev_apFE, dev_apFW, dev_q_etN , dev_q_etS, dev_q_etFN, dev_q_etFS, dev_NpressFN, dev_NpressFS, dev_invJ22_avgSN, dev_apN, dev_apS, dev_apFN, dev_apFS, nxd, nyd); cudaDeviceSynchronize(); Flux5Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_apE, dev_apW, dev_apFE, dev_apFW, dev_apEW , dev_apFEW, dev_apN, dev_apS, dev_apFN, dev_apFS, dev_apSN, dev_apFSN, nxd, nyd); cudaDeviceSynchronize(); Flux6Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_apEW, dev_apFEW, dev_apSN, dev_apFSN, dev_em_x , dev_em_y, dev_em_Fx, dev_em_Fy, dev_czw1x, dev_czw2x, dev_czwF1x,dev_czwF2x, dev_czw1y, dev_czw2y, dev_czwF1y,dev_czwF2y, dev_uE, dev_uW, dev_uN, dev_uS, dev_cval_avgEW, dev_cval_avgSN, dev_Detmin_avgEW, dev_Detmin_avgSN, dev_M11EW, dev_M22SN, nxd, nyd); cudaDeviceSynchronize(); Flux7Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_FpE, dev_FpW, dev_GpN, dev_GpS, dev_czw1x, dev_czw2x, dev_czwF1x,dev_czwF2x, dev_czw1y, dev_czw2y, dev_czwF1y,dev_czwF2y, dev_uE, dev_uW, dev_uN, dev_uS, dev_Detmin_avgEW, dev_Detmin_avgSN, dev_q_xiE, dev_q_xiFE, dev_q_xiW, dev_q_xiFW, dev_q_etN, dev_q_etFN, dev_q_etS, dev_q_etFS, dev_dxdxi11_avgEW, dev_dxdxi21_avgEW, dev_dxdxi12_avgSN, dev_dxdxi22_avgSN, nxd, nyd); cudaDeviceSynchronize(); CFL1Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_em_x, dev_em_y, dev_em_Fx, dev_em_Fy, dev_em_valS, dev_em_valF, dx, dy, nxd, nyd); cudaDeviceSynchronize(); CFL2Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_em_valS, dev_em_valF, dev_Val, nxd, nyd); cudaDeviceSynchronize(); if(io==0){ int threads = 256; int blocks = 256;//min((arraySize + threads - 1) / threads, 256); reduceKernel <<<blocks, threads>>> (dev_Val, dev_max, arraySize); cudaDeviceSynchronize(); reduceKernel <<<1, blocks>>> (dev_max, dev_maxW, blocks); cudaDeviceSynchronize(); CFL3Kernel <<<1, threadsPerBlock>>>( dev_result, dev_dtval, dev_maxW, dev_TotalTime); cudaDeviceSynchronize(); errCpy = cudaMemcpy(dt_h, dev_dtval, sizeof(double)* 1, cudaMemcpyDeviceToHost); errCpy = cudaMemcpy(TotalStep_h, dev_TotalTime, sizeof(double)* 1, cudaMemcpyDeviceToHost); if((*TotalStep_h + *dt_h) >= tf[iter]){ *dt_h = tf[iter] - *TotalStep_h; schreiben = 1; errCpy = cudaMemcpy(dev_dtval, dt_h, sizeof(double)* 1, cudaMemcpyHostToDevice); iter++; } if(*TotalStep_h >= inflowTime[inflowCount] && inflowCount < (Iniflowlen-1) ){ inflowCount++; } cudaDeviceSynchronize(); fprintf(stdout, "\r\t\tSimulation progress: %3.0f %%", (*TotalStep_h*0.1/TotalSim)*100); fflush(stdout); } CFL4Kernel <<<1, threadsPerBlock>>>( dev_result, dev_dt, dev_dtval,dev_TotalTime,io); cudaDeviceSynchronize(); Flux8Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_Hpx, dev_Hpy, dev_Ppx, dev_Ppy, dev_FpE, dev_FpW, dev_GpN, dev_GpS, dev_apEW, dev_apFEW, dev_apSN, dev_apFSN, dev_uE, dev_uW, dev_uN, dev_uS, dev_u, dev_ux, dev_uy, dev_Detmin_avgEW, dev_Detmin_avgSN, dev_cval_avgEW, dev_cval_avgSN, dev_invJ11_avgEW, dev_invJ12_avgEW, dev_invJ13_avgEW, dev_invJ21_avgEW, dev_invJ22_avgEW, dev_invJ23_avgEW, dev_invJ31_avgEW, dev_invJ32_avgEW, dev_invJ33_avgEW, dev_invJ11_avgSN, dev_invJ12_avgSN, dev_invJ13_avgSN, dev_invJ21_avgSN, dev_invJ22_avgSN, dev_invJ23_avgSN, dev_invJ31_avgSN, dev_invJ32_avgSN, dev_invJ33_avgSN, dev_dudxE, dev_dvdxE, dev_dudyE, dev_dvdyE, dev_dudxN, dev_dvdxN, dev_dudyN, dev_dvdyN, dx, dy, nxd, nyd); cudaDeviceSynchronize(); Flux9Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_duxidxix, dev_dvetdxix, dev_duxidetx, dev_dvetdetx, dev_duxidxiy, dev_dvetdxiy, dev_duxidety, dev_dvetdety, dev_invJ11_avgEW, dev_invJ12_avgEW, dev_invJ13_avgEW, dev_invJ21_avgEW, dev_invJ22_avgEW, dev_invJ23_avgEW, dev_invJ31_avgEW, dev_invJ32_avgEW, dev_invJ33_avgEW, dev_invJ11_avgSN, dev_invJ12_avgSN, dev_invJ13_avgSN, dev_invJ21_avgSN, dev_invJ22_avgSN, dev_invJ23_avgSN, dev_invJ31_avgSN, dev_invJ32_avgSN, dev_invJ33_avgSN, dev_dudxE, dev_dvdxE, dev_dudyE, dev_dvdyE, dev_dudxN, dev_dvdxN, dev_dudyN, dev_dvdyN, nxd, nyd); cudaDeviceSynchronize(); Flux10Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_PDx, dev_PDy, dev_uE, dev_uW, dev_uN, dev_uS, dev_Detmin_avgEW, dev_Detmin_avgSN, dev_duxidxix, dev_dvetdxix, dev_duxidetx, dev_dvetdetx, dev_duxidxiy, dev_dvetdxiy, dev_duxidety, dev_dvetdety, dev_invJ11_avgEW, dev_invJ12_avgEW, dev_invJ21_avgEW, dev_invJ22_avgEW, dev_invJ11_avgSN, dev_invJ12_avgSN, dev_invJ21_avgSN, dev_invJ22_avgSN, nxd, nyd); cudaDeviceSynchronize(); Flux11Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_vex, dev_vey, dev_vexF, dev_veyF, nxd, nyd); cudaDeviceSynchronize(); Flux12Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_w_wert, dev_w_wertF, dev_vex, dev_vey, dev_vexF, dev_veyF, dev_svec, dev_cvalue, nxd, nyd); cudaDeviceSynchronize(); Flux13Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_w_wert, dev_w_wertF, dev_vex, dev_vey, dev_usw, dev_vel, nxd, nyd); cudaDeviceSynchronize(); Flux14Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_w_wert, dev_w_wertF, dev_vex, dev_vey, dev_vexF, dev_veyF, dev_vexw, dev_veyw, dev_usw, dev_vel, dev_q_xi, dev_q_et, dev_q_xiF, dev_q_etF, dev_invJ11, dev_invJ12, dev_invJ13, dev_invJ21, dev_invJ22, dev_invJ23, nxd, nyd); cudaDeviceSynchronize(); Flux15Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_w_wert, dev_w_wertF, dev_vex, dev_vey, dev_vexF, dev_veyF, dev_q_xi, dev_q_et, dev_q_xiF, dev_q_etF, dev_J13dxi, dev_J23dxi, dev_J33dxi, dev_J13det, dev_J23det, dev_J33det, dev_Ac, dev_AcF, nxd, nyd); cudaDeviceSynchronize(); Flux16Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_Npress1, dev_Npress2, dev_NpressF, dev_Ac, dev_AcF, dev_Detmin, dev_cvalue, dev_u, nxd, nyd); cudaDeviceSynchronize(); Flux17Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_Npress1, dev_Npress2, dev_NpressF, dev_Ac, dev_AcF, dev_Detmin, dev_svec, dev_vex, dev_vey, dev_vexw, dev_veyw, dev_vexF, dev_veyF, dev_tande, dev_u, dev_s, Cd, N_R, varTheta, nxd, nyd); cudaDeviceSynchronize(); Flux18Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_v, dev_Detmin, dev_Hpx, dev_Hpy, dev_Ppx, dev_Ppy, dev_PDx, dev_PDy, dev_s, dev_u, dev_uzero, dev_dt, dx, dy, N_R, nxd, nyd); cudaDeviceSynchronize(); Flux19Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_v, dev_Detmin, dev_u, nxd, nyd); cudaDeviceSynchronize(); Flux20Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_utmp, nxd, nyd); cudaDeviceSynchronize(); Flux21Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, nxd, nyd); cudaDeviceSynchronize(); if(io == 0) { Flux22Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_uone, nxd, nyd); cudaDeviceSynchronize(); Flux23Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_uzero, dev_usxnew, dev_ufxnew, dev_usxold, dev_ufxold, dev_usynew, dev_ufynew, dev_usyold, dev_ufyold, nxd, nyd); cudaDeviceSynchronize(); Flux24Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_uone, dev_usxnew, dev_ufxnew, dev_usxold, dev_ufxold, dev_usynew, dev_ufynew, dev_usyold, dev_ufyold, nxd, nyd); cudaDeviceSynchronize(); } else{ Flux25Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_utwo, nxd, nyd); cudaDeviceSynchronize(); Flux26Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_uone, dev_usxnew, dev_ufxnew, dev_usxold, dev_ufxold, dev_usynew, dev_ufynew, dev_usyold, dev_ufyold, nxd, nyd); cudaDeviceSynchronize(); Flux27Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_utwo, dev_usxnew, dev_ufxnew, dev_usxold, dev_ufxold, dev_usynew, dev_ufynew, dev_usyold, dev_ufyold, nxd, nyd); cudaDeviceSynchronize(); Flux28Kernel <<<blocksPerGrid, threadsPerBlock>>>( dev_result, dev_u, dev_utwo, nxd, nyd); cudaDeviceSynchronize(); } if(io) { if(schreiben==1){ errCpy = cudaMemcpy(resultHs_h, &dev_u[0 * nyd * nxd], sizeof(double)* arraySize, cudaMemcpyDeviceToHost); errCpy = cudaMemcpy(resultHf_h, &dev_u[3 * nyd * nxd], sizeof(double)* arraySize, cudaMemcpyDeviceToHost); errCpy = cudaMemcpy(resultUs_h, &dev_u[1 * nyd * nxd], sizeof(double)* arraySize, cudaMemcpyDeviceToHost); errCpy = cudaMemcpy(resultVs_h, &dev_u[2 * nyd * nxd], sizeof(double)* arraySize, cudaMemcpyDeviceToHost); errCpy = cudaMemcpy(resultUf_h, &dev_u[4 * nyd * nxd], sizeof(double)* arraySize, cudaMemcpyDeviceToHost); errCpy = cudaMemcpy(resultVf_h, &dev_u[5 * nyd * nxd], sizeof(double)* arraySize, cudaMemcpyDeviceToHost); errCpy = cudaMemcpy(resultphi_h,&dev_u[6 * nyd * nxd], sizeof(double)* arraySize, cudaMemcpyDeviceToHost); char outfile_Web[100]; sprintf(outfile_Web,"./result2Pb/%03d.dat",iter); outtime[iter-1] = *TotalStep_h; FILE *fpTmp; fpTmp=fopen("./result2Pb/Time.dat", "w"); for (int nn=0;nn<(iter);nn++){ fprintf(fpTmp, "%20.4f", outtime[nn]); } fclose(fpTmp); FILE *fpout; fpout=fopen(outfile_Web, "w"); fprintf(fpout, "VARIABLES = \"H\", \"phi\", \"Us\", \"Uf\", \"Vs\", \"Vf\"\n "); for (int i=MD;i<nxd-MD;i++) { for (int j=MD;j<nyd-MD;j++) { Htmp = 0.1*(resultHs_h[j * nxd + i] + resultHf_h[j * nxd + i]); hstmp = resultHs_h[j * nxd + i]; hftmp = resultHf_h[j * nxd + i]; if (hstmp > 0.00001){ vstmp = resultVs_h[j * nxd + i]/resultHs_h[j * nxd + i]; ustmp = resultUs_h[j * nxd + i]/resultHs_h[j * nxd + i]; phitmp = resultphi_h[j * nxd + i]; }else{ vstmp = 0.0; ustmp = 0.0; phitmp = 0.0; } if (hftmp > 0.00001){ vftmp = resultVf_h[j * nxd + i]/resultHf_h[j * nxd + i]; uftmp = resultUf_h[j * nxd + i]/resultHf_h[j * nxd + i]; }else{ vftmp = 0.0; uftmp = 0.0; } fprintf(fpout, "%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\t%10.4f\n",Htmp,phitmp,ustmp,uftmp,vstmp,vftmp); } } fclose(fpout); if(iter == outsteplen){ nstop = 1; // Totalnt = nt; } // } schreiben = 0; } } } cudaDeviceSynchronize(); } if (cudaPeekAtLastError() != cudaSuccess) { cout << cudaGetErrorString(cudaPeekAtLastError()) << endl; } errCpy = cudaMemcpy(TotalStep_h, dev_TotalTime, sizeof(double)* 1, cudaMemcpyDeviceToHost); cout << "\nTotal time : " << *TotalStep_h/10 << " sec "; fprintf(stdout, "\nTotal number of steps: %d\n", nt); if(errCpy != cudaSuccess){ printf("\nError cuda Memory copy : %d\n",errCpy); } } void RunKernels::memoryMalloc() { cudaError_t errdevice ; errdevice = cudaMalloc((void **)&dev_topo, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_depth, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_result, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_bfkt, sizeof(double) * arraySize * 3); errdevice = cudaMalloc((void **)&dev_posx, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_posy, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dxdxi11, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dxdxi12, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dxdxi21, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dxdxi22, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dbdx , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dbdy , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_cvalue, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_svec , sizeof(double) * arraySize * 2); errdevice = cudaMalloc((void **)&dev_Jacb31, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_Jacb32, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dettmp, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_Detmin, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_i_ddxi11, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_i_ddxi12, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_i_ddxi21, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_i_ddxi22, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ11, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ12, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ13, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ21, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ22, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ23, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ31, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ32, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ33, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_u , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_uzero , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_Hpx , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_Hpy , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_Ppx , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_Ppy , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_PDx , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_PDy , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_ux , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_uy , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_dux , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_duy , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_t1x , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_t2x , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_t1y , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_t2y , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_sgnAx , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_sgnBx , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_sgnAy , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_sgnBy , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_uE , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_uW , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_uN , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_uS , sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_tande , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J13dxi , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J23dxi , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J33dxi , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J13det , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J23det , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J33det , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apFEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apFSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dxdxi11_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dxdxi21_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dxdxi12_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dxdxi22_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J13dxi_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J23dxi_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J33dxi_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J13det_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J23det_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J33det_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J13dxi_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J23dxi_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J33dxi_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J13det_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J23det_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_J33det_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ11_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ12_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ13_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ21_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ22_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ23_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ31_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ32_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ33_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ11_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ12_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ13_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ21_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ22_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ23_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ31_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ32_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_invJ33_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_Detmin_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_Detmin_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_cval_avgEW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_cval_avgSN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_svec_avgEW , sizeof(double) * arraySize * 2); errdevice = cudaMalloc((void **)&dev_svec_avgSN , sizeof(double) * arraySize * 2); errdevice = cudaMalloc((void **)&dev_vexE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vexW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_veyE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_veyW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_w_wertE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_w_wertW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vexFE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vexFW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_veyFE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_veyFW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_w_wertFE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_w_wertFW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vexN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vexS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_veyN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_veyS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_w_wertN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_w_wertS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vexFN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vexFS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_veyFN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_veyFS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_w_wertFN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_w_wertFS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_xiE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_etE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_xiW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_etW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_xiFE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_etFE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_xiFW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_etFW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_NpressFE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_NpressFW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_M11EW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_xiN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_etN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_xiS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_etS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_xiFN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_etFN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_xiFS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_etFS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_NpressFN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_NpressFS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_M22SN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apFE , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apFW , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apFN , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_apFS , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_em_x , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_em_y , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_em_Fx, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_em_Fy, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_FpE, sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_FpW, sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_GpN, sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_GpS, sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_czw1x , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_czw2x , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_czwF1x, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_czwF2x, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_czw1y , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_czw2y , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_czwF1y, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_czwF2y, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_em_valS, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_em_valF, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_Val , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dudxE, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dvdxE, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dudyE, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dvdyE, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dudxN, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dvdxN, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dudyN, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dvdyN, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_duxidxix, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dvetdxix, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_duxidetx, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dvetdetx, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_duxidxiy, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dvetdxiy, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_duxidety, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_dvetdety, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vex , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vey , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vexF , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_veyF , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_w_wert , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_w_wertF, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_usw , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vel , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_vexw , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_veyw , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_xi , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_et , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_xiF , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_q_etF , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_Ac , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_AcF , sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_Npress1, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_Npress2, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_NpressF, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_s, sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_v, sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_uone, sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_utwo, sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_usxnew, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_ufxnew, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_usxold, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_ufxold, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_usynew, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_ufynew, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_usyold, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_ufyold, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_utmp, sizeof(double) * arraySize * 7); errdevice = cudaMalloc((void **)&dev_waveSpeed, sizeof(double) * arraySize); errdevice = cudaMalloc((void **)&dev_max, sizeof(double) * 256); errdevice = cudaMalloc((void **)&dev_maxW, sizeof(double) * 1); errdevice = cudaMalloc((void **)&dev_TotalTime, sizeof(double) * 1); errdevice = cudaMalloc((void **)&dev_dt, sizeof(double) * 1); errdevice = cudaMalloc((void **)&dev_dtval, sizeof(double) * 1); errdevice = cudaMalloc((void **)&dev_inflow, sizeof(double) * (inflowSize * 3)); errdevice = cudaMalloc((void **)&dev_loc, sizeof(double) * (locflowlen * 3)); errdevice = cudaMalloc((void **)&dev_dire, sizeof(int) * (locflowlen * 3)); if(errdevice != cudaSuccess){ printf("\nCould not allocate Device memory : %d\n",errdevice); } } void RunKernels::freeMemory() { cudaFree(dev_topo); cudaFree(dev_depth); cudaFree(dev_result); // cudaFree(dev_bfkt); cudaFree(dev_posx); cudaFree(dev_posy); cudaFree(dev_dxdxi11); cudaFree(dev_dxdxi12); cudaFree(dev_dxdxi21); cudaFree(dev_dxdxi22); cudaFree(dev_dbdx); cudaFree(dev_dbdy); cudaFree(dev_cvalue); cudaFree(dev_svec); cudaFree(dev_Jacb31); cudaFree(dev_Jacb32); // cudaFree(dev_dettmp); cudaFree(dev_Detmin); // cudaFree(dev_i_ddxi11); // cudaFree(dev_i_ddxi12); // cudaFree(dev_i_ddxi21); // cudaFree(dev_i_ddxi22); cudaFree(dev_invJ11); cudaFree(dev_invJ12); cudaFree(dev_invJ13); cudaFree(dev_invJ21); cudaFree(dev_invJ22); cudaFree(dev_invJ23); cudaFree(dev_invJ31); cudaFree(dev_invJ32); cudaFree(dev_invJ33); cudaFree(dev_u); cudaFree(dev_uzero); cudaFree(dev_tande); cudaFree(dev_J13dxi); cudaFree(dev_J23dxi); cudaFree(dev_J33dxi); cudaFree(dev_J13det); cudaFree(dev_J23det); cudaFree(dev_J33det); cudaFree(dev_Hpx); cudaFree(dev_Hpy); cudaFree(dev_Ppx); cudaFree(dev_Ppy); cudaFree(dev_PDx); cudaFree(dev_PDy); cudaFree(dev_ux); cudaFree(dev_uy); cudaFree(dev_apEW); cudaFree(dev_apSN); cudaFree(dev_apFEW); cudaFree(dev_apFSN); cudaFree(dev_dux); cudaFree(dev_duy); cudaFree(dev_t1x); cudaFree(dev_t2x); cudaFree(dev_t1y); cudaFree(dev_t2y); cudaFree(dev_sgnAx); cudaFree(dev_sgnBx); cudaFree(dev_sgnAy); cudaFree(dev_sgnBy); cudaFree(dev_dxdxi11_avgEW); cudaFree(dev_dxdxi21_avgEW); cudaFree(dev_dxdxi12_avgSN); cudaFree(dev_dxdxi22_avgSN); cudaFree(dev_J13dxi_avgEW); cudaFree(dev_J23dxi_avgEW); cudaFree(dev_J33dxi_avgEW); cudaFree(dev_J13det_avgEW); cudaFree(dev_J23det_avgEW); cudaFree(dev_J33det_avgEW); cudaFree(dev_J13dxi_avgSN); cudaFree(dev_J23dxi_avgSN); cudaFree(dev_J33dxi_avgSN); cudaFree(dev_J13det_avgSN); cudaFree(dev_J23det_avgSN); cudaFree(dev_J33det_avgSN); cudaFree(dev_invJ11_avgEW); cudaFree(dev_invJ12_avgEW); cudaFree(dev_invJ13_avgEW); cudaFree(dev_invJ21_avgEW); cudaFree(dev_invJ22_avgEW); cudaFree(dev_invJ23_avgEW); cudaFree(dev_invJ31_avgEW); cudaFree(dev_invJ32_avgEW); cudaFree(dev_invJ33_avgEW); cudaFree(dev_invJ11_avgSN); cudaFree(dev_invJ12_avgSN); cudaFree(dev_invJ13_avgSN); cudaFree(dev_invJ21_avgSN); cudaFree(dev_invJ22_avgSN); cudaFree(dev_invJ23_avgSN); cudaFree(dev_invJ31_avgSN); cudaFree(dev_invJ32_avgSN); cudaFree(dev_invJ33_avgSN); cudaFree(dev_Detmin_avgEW); cudaFree(dev_Detmin_avgSN); cudaFree(dev_cval_avgEW); cudaFree(dev_cval_avgSN); cudaFree(dev_svec_avgEW); cudaFree(dev_svec_avgSN); cudaFree(dev_uE); cudaFree(dev_uW); cudaFree(dev_uN); cudaFree(dev_uS); cudaFree(dev_vexE); cudaFree(dev_vexW); cudaFree(dev_veyE); cudaFree(dev_veyW); cudaFree(dev_w_wertE); cudaFree(dev_w_wertW); cudaFree(dev_vexFE); cudaFree(dev_vexFW); cudaFree(dev_veyFE); cudaFree(dev_veyFW); cudaFree(dev_w_wertFE); cudaFree(dev_w_wertFW); cudaFree(dev_vexN); cudaFree(dev_vexS); cudaFree(dev_veyN); cudaFree(dev_veyS); cudaFree(dev_w_wertFN); cudaFree(dev_w_wertFS); cudaFree(dev_q_xiE); cudaFree(dev_q_etE); cudaFree(dev_q_xiW); cudaFree(dev_q_etW); cudaFree(dev_q_xiFE); cudaFree(dev_q_etFE); cudaFree(dev_q_xiFW); cudaFree(dev_q_etFW); cudaFree(dev_NpressFE); cudaFree(dev_NpressFW); cudaFree(dev_M11EW); cudaFree(dev_q_xiN); cudaFree(dev_q_etN); cudaFree(dev_q_xiS); cudaFree(dev_q_etS); cudaFree(dev_q_xiFN); cudaFree(dev_q_etFN); cudaFree(dev_q_xiFS); cudaFree(dev_q_etFS); cudaFree(dev_NpressFN); cudaFree(dev_NpressFS); cudaFree(dev_M22SN); cudaFree(dev_apE); cudaFree(dev_apW); cudaFree(dev_apFE); cudaFree(dev_apFW); cudaFree(dev_apN); cudaFree(dev_apS); cudaFree(dev_apFN); cudaFree(dev_apFS); cudaFree(dev_em_x); cudaFree(dev_em_y); cudaFree(dev_em_Fx); cudaFree(dev_em_Fy); cudaFree(dev_FpE); cudaFree(dev_FpW); cudaFree(dev_GpN); cudaFree(dev_GpS); cudaFree(dev_czw1x); cudaFree(dev_czw2x); cudaFree(dev_czwF1x); cudaFree(dev_czwF2x); cudaFree(dev_czw1y); cudaFree(dev_czw2y); cudaFree(dev_czwF1y); cudaFree(dev_czwF2y); cudaFree(dev_em_valS); cudaFree(dev_em_valF); cudaFree(dev_Val); cudaFree(dev_dudxE); cudaFree(dev_dvdxE); cudaFree(dev_dudyE); cudaFree(dev_dvdyE); cudaFree(dev_dudxN); cudaFree(dev_dvdxN); cudaFree(dev_dudyN); cudaFree(dev_dvdyN); cudaFree(dev_duxidxix); cudaFree(dev_dvetdxix); cudaFree(dev_duxidetx); cudaFree(dev_dvetdetx); cudaFree(dev_duxidxiy); cudaFree(dev_dvetdxiy); cudaFree(dev_duxidety); cudaFree(dev_dvetdety); cudaFree(dev_vex); cudaFree(dev_vey); cudaFree(dev_vexF); cudaFree(dev_veyF); cudaFree(dev_w_wert); cudaFree(dev_w_wertF); cudaFree(dev_usw); cudaFree(dev_vel); cudaFree(dev_vexw); cudaFree(dev_veyw); cudaFree(dev_q_xi); cudaFree(dev_q_et); cudaFree(dev_q_xiF); cudaFree(dev_q_etF); cudaFree(dev_Ac); cudaFree(dev_AcF); cudaFree(dev_Npress1); cudaFree(dev_Npress2); cudaFree(dev_NpressF); cudaFree(dev_s); cudaFree(dev_v); cudaFree(dev_uone); cudaFree(dev_utwo); cudaFree(dev_usxnew); cudaFree(dev_ufxnew); cudaFree(dev_usxold); cudaFree(dev_ufxold); cudaFree(dev_utmp); cudaFree(dev_inflow); cudaFree(dev_loc); cudaFree(dev_dire); cudaFree(dev_waveSpeed); cudaFree(dev_max); cudaFree(dev_maxW); cudaFree(dev_TotalTime); cudaFree(dev_dt); }
15dc4bba303b086fe70bcbad1d946c54f6dea01b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #define N 8 __global__ void reduceVector(float *a, int thread){ int id = blockIdx.x*blockDim.x+threadIdx.x; for(int s=N/2; s>=1; s/=2){ if(id<s) *(a+id) += *(a+id+s); __syncthreads(); } } int main() { int memsize = sizeof(float)*N; float *a =(float *) malloc(memsize); float *d_a; for(int i=0;i<N;++i){ *(a+i)=(float)(rand()%10); printf("%f ,", *(a+i)); } printf("\n"); hipMalloc(&d_a, memsize); hipMemcpy(d_a, a, memsize, hipMemcpyHostToDevice); int thread = (int) ceilf((double)N/2); hipLaunchKernelGGL(( reduceVector) , dim3(1), dim3(thread), 0, 0, d_a, thread); hipMemcpy(a, d_a, memsize, hipMemcpyDeviceToHost); printf("%f ,", *a); printf("\n\n"); free(a);hipFree(d_a); }
15dc4bba303b086fe70bcbad1d946c54f6dea01b.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #define N 8 __global__ void reduceVector(float *a, int thread){ int id = blockIdx.x*blockDim.x+threadIdx.x; for(int s=N/2; s>=1; s/=2){ if(id<s) *(a+id) += *(a+id+s); __syncthreads(); } } int main() { int memsize = sizeof(float)*N; float *a =(float *) malloc(memsize); float *d_a; for(int i=0;i<N;++i){ *(a+i)=(float)(rand()%10); printf("%f ,", *(a+i)); } printf("\n"); cudaMalloc(&d_a, memsize); cudaMemcpy(d_a, a, memsize, cudaMemcpyHostToDevice); int thread = (int) ceilf((double)N/2); reduceVector <<<1, thread>>> (d_a, thread); cudaMemcpy(a, d_a, memsize, cudaMemcpyDeviceToHost); printf("%f ,", *a); printf("\n\n"); free(a);cudaFree(d_a); }
7b344ece979c237528d12bcd8664ab726e6c3936.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*Suma Matrices por Fila*/ #include <iostream> using namespace std; __global__ void sumaMatrizKernel(float* A, float* B, float* C, int n) { int i = threadIdx.x + (blockDim.x * blockIdx.x); if(i<n) { for(int j=0;j<n;j++) C[i*n+j] = A[i*n+j] + B[i*n+j]; } } void sumaMatrix(float* A, float* B, float* C, int tam) { int size = (tam*tam) * sizeof(float); float *d_A,*d_B,*d_C; hipMalloc((void**)&d_A,size); hipMalloc((void**)&d_B,size); hipMalloc((void**)&d_C,size); hipMemcpy(d_A,A,size,hipMemcpyHostToDevice); hipMemcpy(d_B,B,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( sumaMatrizKernel), dim3(ceil(tam/256.0)),dim3(256), 0, 0, d_A,d_B,d_C,tam); hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost); hipFree(d_A);hipFree(d_B);hipFree(d_C); } int main() { int n = 10; float *h_A,*h_B,*h_C; h_A = new float[n*n]; h_B = new float[n*n]; h_C = new float[n*n]; for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) h_A[i*n+j] = rand() % 100; } for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) h_B[i*n+j] = rand() % 100; } cout<<"Los vectores generados son: "<<endl; for(int i = 0; i < n; i++){ cout<<h_A[i]<<" ; "; } cout<<endl; for(int i = 0; i < n; i++){ cout<<h_B[i]<<" ; "; } cout<<endl; sumaMatrix(h_A,h_B,h_C,n); for(int i = 0; i < n; i++){ cout<<h_C[i]<<" ; "; } cout<<endl; return 0; }
7b344ece979c237528d12bcd8664ab726e6c3936.cu
/*Suma Matrices por Fila*/ #include <iostream> using namespace std; __global__ void sumaMatrizKernel(float* A, float* B, float* C, int n) { int i = threadIdx.x + (blockDim.x * blockIdx.x); if(i<n) { for(int j=0;j<n;j++) C[i*n+j] = A[i*n+j] + B[i*n+j]; } } void sumaMatrix(float* A, float* B, float* C, int tam) { int size = (tam*tam) * sizeof(float); float *d_A,*d_B,*d_C; cudaMalloc((void**)&d_A,size); cudaMalloc((void**)&d_B,size); cudaMalloc((void**)&d_C,size); cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice); cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice); sumaMatrizKernel<<<ceil(tam/256.0),256>>>(d_A,d_B,d_C,tam); cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost); cudaFree(d_A);cudaFree(d_B);cudaFree(d_C); } int main() { int n = 10; float *h_A,*h_B,*h_C; h_A = new float[n*n]; h_B = new float[n*n]; h_C = new float[n*n]; for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) h_A[i*n+j] = rand() % 100; } for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) h_B[i*n+j] = rand() % 100; } cout<<"Los vectores generados son: "<<endl; for(int i = 0; i < n; i++){ cout<<h_A[i]<<" ; "; } cout<<endl; for(int i = 0; i < n; i++){ cout<<h_B[i]<<" ; "; } cout<<endl; sumaMatrix(h_A,h_B,h_C,n); for(int i = 0; i < n; i++){ cout<<h_C[i]<<" ; "; } cout<<endl; return 0; }
8ac236c7d3a0cbb1da72ce821cdf562fe628f5fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @brief * utils * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include "k2/csrc/macros.h" #include "k2/csrc/math.h" #include "k2/csrc/nvtx.h" #include "k2/csrc/utils.h" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); hipLaunchKernelGGL(( FillValuesKernel), dim3(grid_size), dim3(block_size), 0, 0, data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { NVTX_RANGE(K2_FUNC); if (num_rows <= 0 || num_elems <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_rows, threads_per_row, row_splits, num_elems, row_ids)); } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) K2_EVAL( c, num_elems + 1, lambda_init_minus_one, (int32_t i)->void { row_ids[i] = -1; }); K2_EVAL( c, num_elems + 1, lambda_phase_one, (int32_t i)->void { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the // same value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < // next_row_split such that i is the result of rounding up // this_row_split to (something)*2^n, for n = 1, 2, 3, ... this will // take time logarithmic in (next_row_split - this_row_split). we // can then fill in the gaps with a logarithmic-time loop, by // looking for a value that's not (-1) by rounding the current index // down to successively higher powers of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j // + (1<<power) < next_row_split" in the loop condition. Note, // we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }); // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. K2_EVAL( c, num_elems, lambda_phase_two, (int32_t j)->void { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { NVTX_RANGE(K2_FUNC); // process corner case first if (num_elems == 0) { K2_EVAL( c, num_rows + 1, lambda_set_values, (int32_t i)->void { row_splits[i] = 0; }); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); if (no_empty_rows) { auto lambda_simple = [=] __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; EvalDevice(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = (num_elems + 1) * threads_per_elem; // +1 for the last row int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x; int32_t task_idx = thread / threads_per_task; if (task_idx >= num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = thread % threads_per_task; int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (dart_separation <= 0) { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (min(next_row_split / dart_separation, num_tasks) - min(this_row_split / dart_separation, num_tasks)); // function `min` is from cuda K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } void GetTaskRedirect(hipStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { NVTX_RANGE(K2_FUNC); if (num_tasks <= 0) return; if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; if (dart_separation != 0) { for (int32_t task = 0; task < num_tasks; ++task) { int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (::min(next_row_split / dart_separation, num_tasks) - ::min(this_row_split / dart_separation, num_tasks)); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } else { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return for (int32_t task = 0; task < num_tasks; ++task) { int32_t num_jobs_this_task = 2; for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = task + job_id_this_task * num_tasks; redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } } else { // compare 8 to 2, which is the expected number of jobs per task. having // 8 substantially greater than 2 gives a fairly big safety factor. // However this is still far from ideal in scenarios where the number of // tasks might be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task>) , dim3(grid_size), dim3(block_size), 0, stream, num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out); } } // namespace k2
8ac236c7d3a0cbb1da72ce821cdf562fe628f5fa.cu
/** * @brief * utils * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include "k2/csrc/macros.h" #include "k2/csrc/math.h" #include "k2/csrc/nvtx.h" #include "k2/csrc/utils.h" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); FillValuesKernel<<<grid_size, block_size>>>(data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { NVTX_RANGE(K2_FUNC); if (num_rows <= 0 || num_elems <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_rows, threads_per_row, row_splits, num_elems, row_ids)); } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) K2_EVAL( c, num_elems + 1, lambda_init_minus_one, (int32_t i)->void { row_ids[i] = -1; }); K2_EVAL( c, num_elems + 1, lambda_phase_one, (int32_t i)->void { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the // same value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < // next_row_split such that i is the result of rounding up // this_row_split to (something)*2^n, for n = 1, 2, 3, ... this will // take time logarithmic in (next_row_split - this_row_split). we // can then fill in the gaps with a logarithmic-time loop, by // looking for a value that's not (-1) by rounding the current index // down to successively higher powers of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j // + (1<<power) < next_row_split" in the loop condition. Note, // we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }); // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. K2_EVAL( c, num_elems, lambda_phase_two, (int32_t j)->void { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { NVTX_RANGE(K2_FUNC); // process corner case first if (num_elems == 0) { K2_EVAL( c, num_rows + 1, lambda_set_values, (int32_t i)->void { row_splits[i] = 0; }); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); if (no_empty_rows) { auto lambda_simple = [=] __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; EvalDevice(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = (num_elems + 1) * threads_per_elem; // +1 for the last row int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x; int32_t task_idx = thread / threads_per_task; if (task_idx >= num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = thread % threads_per_task; int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (dart_separation <= 0) { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (min(next_row_split / dart_separation, num_tasks) - min(this_row_split / dart_separation, num_tasks)); // function `min` is from cuda K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } void GetTaskRedirect(cudaStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { NVTX_RANGE(K2_FUNC); if (num_tasks <= 0) return; if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; if (dart_separation != 0) { for (int32_t task = 0; task < num_tasks; ++task) { int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (std::min(next_row_split / dart_separation, num_tasks) - std::min(this_row_split / dart_separation, num_tasks)); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } else { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return for (int32_t task = 0; task < num_tasks; ++task) { int32_t num_jobs_this_task = 2; for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = task + job_id_this_task * num_tasks; redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } } else { // compare 8 to 2, which is the expected number of jobs per task. having // 8 substantially greater than 2 gives a fairly big safety factor. // However this is still far from ideal in scenarios where the number of // tasks might be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task> <<<grid_size, block_size, 0, stream>>>( num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out); } } // namespace k2
53f01784f22581016ac1dcebc6857c1ab3bd6444.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <test_utils.h> #include <cuml/decomposition/params.hpp> #include <raft/random/rng.cuh> #include <tsvd/tsvd.cuh> #include <vector> namespace ML { using namespace MLCommon; template <typename T> struct TsvdInputs { T tolerance; int len; int n_row; int n_col; int len2; int n_row2; int n_col2; unsigned long long int seed; int algo; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const TsvdInputs<T>& dims) { return os; } template <typename T> class TsvdTest : public ::testing::TestWithParam<TsvdInputs<T>> { protected: void basicTest() { params = ::testing::TestWithParam<TsvdInputs<T>>::GetParam(); raft::random::Rng r(params.seed, raft::random::GenTaps); int len = params.len; raft::allocate(data, len, stream); std::vector<T> data_h = {1.0, 2.0, 4.0, 2.0, 4.0, 5.0, 5.0, 4.0, 2.0, 1.0, 6.0, 4.0}; data_h.resize(len); raft::update_device(data, data_h.data(), len, stream); int len_comp = params.n_col * params.n_col; raft::allocate(components, len_comp, stream); raft::allocate(singular_vals, params.n_col, stream); std::vector<T> components_ref_h = { -0.3951, 0.1532, 0.9058, -0.7111, -0.6752, -0.1959, -0.5816, 0.7215, -0.3757}; components_ref_h.resize(len_comp); raft::allocate(components_ref, len_comp, stream); raft::update_device(components_ref, components_ref_h.data(), len_comp, stream); paramsTSVD prms; prms.n_cols = params.n_col; prms.n_rows = params.n_row; prms.n_components = params.n_col; if (params.algo == 0) prms.algorithm = solver::COV_EIG_DQ; else prms.algorithm = solver::COV_EIG_JACOBI; tsvdFit(handle, data, components, singular_vals, prms, stream); } void advancedTest() { params = ::testing::TestWithParam<TsvdInputs<T>>::GetParam(); raft::random::Rng r(params.seed, raft::random::GenTaps); int len = params.len2; paramsTSVD prms; prms.n_cols = params.n_col2; prms.n_rows = params.n_row2; prms.n_components = params.n_col2; if (params.algo == 0) prms.algorithm = solver::COV_EIG_DQ; else if (params.algo == 1) prms.algorithm = solver::COV_EIG_JACOBI; else prms.n_components = params.n_col2 - 15; raft::allocate(data2, len, stream); r.uniform(data2, len, T(-1.0), T(1.0), stream); raft::allocate(data2_trans, prms.n_rows * prms.n_components, stream); int len_comp = params.n_col2 * prms.n_components; raft::allocate(components2, len_comp, stream); raft::allocate(explained_vars2, prms.n_components, stream); raft::allocate(explained_var_ratio2, prms.n_components, stream); raft::allocate(singular_vals2, prms.n_components, stream); tsvdFitTransform(handle, data2, data2_trans, components2, explained_vars2, explained_var_ratio2, singular_vals2, prms, stream); raft::allocate(data2_back, len, stream); tsvdInverseTransform(handle, data2_trans, components2, data2_back, prms, stream); } void SetUp() override { CUDA_CHECK(hipStreamCreate(&stream)); handle.set_stream(stream); basicTest(); advancedTest(); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(components)); CUDA_CHECK(hipFree(singular_vals)); CUDA_CHECK(hipFree(components_ref)); CUDA_CHECK(hipFree(data2)); CUDA_CHECK(hipFree(data2_trans)); CUDA_CHECK(hipFree(data2_back)); CUDA_CHECK(hipFree(components2)); CUDA_CHECK(hipFree(explained_vars2)); CUDA_CHECK(hipFree(explained_var_ratio2)); CUDA_CHECK(hipFree(singular_vals2)); CUDA_CHECK(hipStreamDestroy(stream)); } protected: TsvdInputs<T> params; T *data, *components, *singular_vals, *components_ref, *explained_vars_ref; T *data2, *data2_trans, *data2_back, *components2, *explained_vars2, *explained_var_ratio2, *singular_vals2; raft::handle_t handle; hipStream_t stream = 0; }; const std::vector<TsvdInputs<float>> inputsf2 = { {0.01f, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 0}, {0.01f, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 1}, {0.05f, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2}, {0.05f, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2}}; const std::vector<TsvdInputs<double>> inputsd2 = { {0.01, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 0}, {0.01, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 1}, {0.05, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2}, {0.05, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2}}; typedef TsvdTest<float> TsvdTestLeftVecF; TEST_P(TsvdTestLeftVecF, Result) { ASSERT_TRUE(raft::devArrMatch(components, components_ref, (params.n_col * params.n_col), raft::CompareApproxAbs<float>(params.tolerance))); } typedef TsvdTest<double> TsvdTestLeftVecD; TEST_P(TsvdTestLeftVecD, Result) { ASSERT_TRUE(raft::devArrMatch(components, components_ref, (params.n_col * params.n_col), raft::CompareApproxAbs<double>(params.tolerance))); } typedef TsvdTest<float> TsvdTestDataVecF; TEST_P(TsvdTestDataVecF, Result) { ASSERT_TRUE(raft::devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2), raft::CompareApproxAbs<float>(params.tolerance))); } typedef TsvdTest<double> TsvdTestDataVecD; TEST_P(TsvdTestDataVecD, Result) { ASSERT_TRUE(raft::devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2), raft::CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecD, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecD, ::testing::ValuesIn(inputsd2)); } // end namespace ML
53f01784f22581016ac1dcebc6857c1ab3bd6444.cu
/* * Copyright (c) 2018-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <test_utils.h> #include <cuml/decomposition/params.hpp> #include <raft/random/rng.cuh> #include <tsvd/tsvd.cuh> #include <vector> namespace ML { using namespace MLCommon; template <typename T> struct TsvdInputs { T tolerance; int len; int n_row; int n_col; int len2; int n_row2; int n_col2; unsigned long long int seed; int algo; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const TsvdInputs<T>& dims) { return os; } template <typename T> class TsvdTest : public ::testing::TestWithParam<TsvdInputs<T>> { protected: void basicTest() { params = ::testing::TestWithParam<TsvdInputs<T>>::GetParam(); raft::random::Rng r(params.seed, raft::random::GenTaps); int len = params.len; raft::allocate(data, len, stream); std::vector<T> data_h = {1.0, 2.0, 4.0, 2.0, 4.0, 5.0, 5.0, 4.0, 2.0, 1.0, 6.0, 4.0}; data_h.resize(len); raft::update_device(data, data_h.data(), len, stream); int len_comp = params.n_col * params.n_col; raft::allocate(components, len_comp, stream); raft::allocate(singular_vals, params.n_col, stream); std::vector<T> components_ref_h = { -0.3951, 0.1532, 0.9058, -0.7111, -0.6752, -0.1959, -0.5816, 0.7215, -0.3757}; components_ref_h.resize(len_comp); raft::allocate(components_ref, len_comp, stream); raft::update_device(components_ref, components_ref_h.data(), len_comp, stream); paramsTSVD prms; prms.n_cols = params.n_col; prms.n_rows = params.n_row; prms.n_components = params.n_col; if (params.algo == 0) prms.algorithm = solver::COV_EIG_DQ; else prms.algorithm = solver::COV_EIG_JACOBI; tsvdFit(handle, data, components, singular_vals, prms, stream); } void advancedTest() { params = ::testing::TestWithParam<TsvdInputs<T>>::GetParam(); raft::random::Rng r(params.seed, raft::random::GenTaps); int len = params.len2; paramsTSVD prms; prms.n_cols = params.n_col2; prms.n_rows = params.n_row2; prms.n_components = params.n_col2; if (params.algo == 0) prms.algorithm = solver::COV_EIG_DQ; else if (params.algo == 1) prms.algorithm = solver::COV_EIG_JACOBI; else prms.n_components = params.n_col2 - 15; raft::allocate(data2, len, stream); r.uniform(data2, len, T(-1.0), T(1.0), stream); raft::allocate(data2_trans, prms.n_rows * prms.n_components, stream); int len_comp = params.n_col2 * prms.n_components; raft::allocate(components2, len_comp, stream); raft::allocate(explained_vars2, prms.n_components, stream); raft::allocate(explained_var_ratio2, prms.n_components, stream); raft::allocate(singular_vals2, prms.n_components, stream); tsvdFitTransform(handle, data2, data2_trans, components2, explained_vars2, explained_var_ratio2, singular_vals2, prms, stream); raft::allocate(data2_back, len, stream); tsvdInverseTransform(handle, data2_trans, components2, data2_back, prms, stream); } void SetUp() override { CUDA_CHECK(cudaStreamCreate(&stream)); handle.set_stream(stream); basicTest(); advancedTest(); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(components)); CUDA_CHECK(cudaFree(singular_vals)); CUDA_CHECK(cudaFree(components_ref)); CUDA_CHECK(cudaFree(data2)); CUDA_CHECK(cudaFree(data2_trans)); CUDA_CHECK(cudaFree(data2_back)); CUDA_CHECK(cudaFree(components2)); CUDA_CHECK(cudaFree(explained_vars2)); CUDA_CHECK(cudaFree(explained_var_ratio2)); CUDA_CHECK(cudaFree(singular_vals2)); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: TsvdInputs<T> params; T *data, *components, *singular_vals, *components_ref, *explained_vars_ref; T *data2, *data2_trans, *data2_back, *components2, *explained_vars2, *explained_var_ratio2, *singular_vals2; raft::handle_t handle; cudaStream_t stream = 0; }; const std::vector<TsvdInputs<float>> inputsf2 = { {0.01f, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 0}, {0.01f, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 1}, {0.05f, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2}, {0.05f, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2}}; const std::vector<TsvdInputs<double>> inputsd2 = { {0.01, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 0}, {0.01, 4 * 3, 4, 3, 1024 * 128, 1024, 128, 1234ULL, 1}, {0.05, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2}, {0.05, 4 * 3, 4, 3, 512 * 64, 512, 64, 1234ULL, 2}}; typedef TsvdTest<float> TsvdTestLeftVecF; TEST_P(TsvdTestLeftVecF, Result) { ASSERT_TRUE(raft::devArrMatch(components, components_ref, (params.n_col * params.n_col), raft::CompareApproxAbs<float>(params.tolerance))); } typedef TsvdTest<double> TsvdTestLeftVecD; TEST_P(TsvdTestLeftVecD, Result) { ASSERT_TRUE(raft::devArrMatch(components, components_ref, (params.n_col * params.n_col), raft::CompareApproxAbs<double>(params.tolerance))); } typedef TsvdTest<float> TsvdTestDataVecF; TEST_P(TsvdTestDataVecF, Result) { ASSERT_TRUE(raft::devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2), raft::CompareApproxAbs<float>(params.tolerance))); } typedef TsvdTest<double> TsvdTestDataVecD; TEST_P(TsvdTestDataVecD, Result) { ASSERT_TRUE(raft::devArrMatch(data2, data2_back, (params.n_col2 * params.n_col2), raft::CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestLeftVecD, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(TsvdTests, TsvdTestDataVecD, ::testing::ValuesIn(inputsd2)); } // end namespace ML
91fba8bed34a31599001482dabbe01722187d60f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <sys/time.h> #include <algorithm> #include <complex> #include <cstdlib> #include <functional> #include <iomanip> #include <iostream> #include <random> #include <vector> #include "cu_complex.h" #include "gpu_error.cuh" #include "types.hpp" #include "versions.hpp" #if !defined PARM || !defined PARN #error "PARM or PARN is not specified! Specify M and N to measure" #endif using namespace std; #ifdef TSMM bool tsmttsm_mode = false; bool tsmm_mode = true; #endif #ifdef TSMTTSM bool tsmttsm_mode = true; bool tsmm_mode = false; #endif void printMatrix(const vector<htype>& m1, const vector<htype>& m2, size_t N1, size_t N2, size_t stride, size_t position = 0, string matchColor = "\e[32m", string mismatchColor = "\e[31m") { const size_t range = 10; size_t n1 = position < range ? 0 : position - range; cout << " - " << n1 << " - \n"; for (; n1 < N1 && n1 < position + range; n1++) { for (size_t n2 = 0; n2 < N2; n2++) { if (m1[n1 * stride + n2] == m2[n1 * stride + n2]) cout << matchColor; else cout << mismatchColor; cout << m1[n1 * stride + n2] << "\e[0m\t"; } cout << "\n"; } cout << " - " << n1 << " - \n"; } vector<htype> hA; vector<htype> hB; vector<htype> hC; vector<htype> hC_test; vector<htype> hC_reference; size_t totalA, totalB, totalC; dtype *A_clean, *B_clean, *C_clean; dtype *A_dirty, *B_dirty, *C_dirty; dtype* temp_storage; void initMatmul() { hA = vector<htype>(totalA); hB = vector<htype>(totalB); hC = vector<htype>(totalC); hC_test = vector<htype>(totalC); hC_reference = vector<htype>(totalC); GPU_ERROR(hipMalloc(&A_clean, sizeof(dtype) * totalA)); GPU_ERROR(hipMalloc(&B_clean, sizeof(dtype) * totalB)); GPU_ERROR(hipMalloc(&C_clean, sizeof(dtype) * totalC)); GPU_ERROR(hipMalloc(&A_dirty, sizeof(dtype) * totalA)); GPU_ERROR(hipMalloc(&B_dirty, sizeof(dtype) * totalB)); GPU_ERROR(hipMalloc(&C_dirty, sizeof(dtype) * totalC)); #pragma omp parallel { random_device r; default_random_engine gen(r()); uniform_int_distribution<int> dis(-2, 2); #pragma omp for for (size_t i = 0; i < totalA; i++) { hA[i] = RAND_HTYPE(dis(gen)); } #pragma omp for for (size_t i = 0; i < totalB; i++) { hB[i] = RAND_HTYPE(dis(gen)); } #pragma omp for for (size_t i = 0; i < totalC; i++) { hC[i] = RAND_HTYPE(dis(gen)); } } GPU_ERROR(hipMemcpy(A_clean, hA.data(), sizeof(htype) * totalA, hipMemcpyDefault)); GPU_ERROR(hipMemcpy(B_clean, hB.data(), sizeof(htype) * totalB, hipMemcpyDefault)); GPU_ERROR(hipMemcpy(C_clean, hC.data(), sizeof(htype) * totalC, hipMemcpyDefault)); GPU_ERROR(hipDeviceSynchronize()); } void deInitMatmul() { GPU_ERROR(hipFree(A_clean)); GPU_ERROR(hipFree(B_clean)); GPU_ERROR(hipFree(C_clean)); GPU_ERROR(hipFree(A_dirty)); GPU_ERROR(hipFree(B_dirty)); GPU_ERROR(hipFree(C_dirty)); } bool cleanMatmul(MatmulFunctionType matmulFunction, size_t M, size_t N, size_t K, int lda, int ldb, int ldc, size_t blockCount, bool self, htype beta, vector<htype>& resultDest) { GPU_ERROR( hipMemcpy(A_dirty, A_clean, sizeof(htype) * totalA, hipMemcpyDefault)); GPU_ERROR( hipMemcpy(B_dirty, B_clean, sizeof(htype) * totalB, hipMemcpyDefault)); GPU_ERROR( hipMemcpy(C_dirty, C_clean, sizeof(htype) * totalC, hipMemcpyDefault)); dtype dalpha = makeDtype(1.0); dtype dbeta = makeDtype(beta); bool result; if (!self) { result = matmulFunction(blockCount, M, N, K, A_dirty, lda, dalpha, B_dirty, ldb, dbeta, C_dirty, ldc); } else if (tsmm_mode) { result = matmulFunction(blockCount, M, N, K, C_dirty, ldc, dalpha, B_dirty, ldb, dbeta, C_dirty, ldc); } else if (M == N) { result = matmulFunction(blockCount, M, N, K, A_dirty, lda, dalpha, A_dirty, lda, dbeta, C_dirty, ldc); } else { result = false; } GPU_ERROR(hipMemcpy(resultDest.data(), C_dirty, sizeof(htype) * totalC, hipMemcpyDefault)); return result; } enum class TESTRESULT { PASS, SKIP, FAIL }; TESTRESULT testMatmul(MatmulFunctionType matmulFunction, MatmulFunctionType referenceFunction, size_t M, size_t N, size_t K, int lda, int ldb, int ldc, size_t blockCount, bool self, htype beta) { // matmulFunction does not support parameters, this is a pass if (!cleanMatmul(matmulFunction, M, N, K, lda, ldb, ldc, blockCount, self, beta, hC_test)) return TESTRESULT::SKIP; GPU_ERROR(hipDeviceSynchronize()); cleanMatmul(referenceFunction, M, N, K, lda, ldb, ldc, blockCount, self, beta, hC_reference); GPU_ERROR(hipDeviceSynchronize()); bool passed = true; #ifdef TSMM size_t C1 = K; size_t C2 = N; #endif #ifdef TSMTTSM size_t C1 = N; size_t C2 = M; #endif for (size_t c1 = 0; c1 < C1; c1++) { for (size_t c2 = 0; c2 < C2; c2++) { if (hC_test[c1 * ldc + c2] != hC_reference[c1 * ldc + c2]) { cout << "\n( " << blockCount << " blocks, " << ((self) ? "A*A" : "A*B") << ", beta=" << beta << ", lda=" << lda << ", ldb=" << ldb << ", ldc=" << ldc << ") "; cout << "\e[31mMismatch\e[0m at " << c1 << ", " << c2 << "; " << hC_test[c1 * ldc + c2] << " != " << hC_reference[c1 * ldc + c2] << " "; #ifdef VERBOSE_ERRORS cout << "\n"; printMatrix(hC_test, hC_reference, C1, C2, ldc, c1); cout << "\n--\n"; printMatrix(hC_reference, hC_reference, C1, C2, ldc, c1); cout << "--\n\n"; cout << K << " Rows\n"; #endif passed = false; break; } } if (!passed) break; } if (passed) return TESTRESULT::PASS; else return TESTRESULT::FAIL; } int main(int argc, char** argv) { int m1 = 0; int m2 = 0; int n1 = 0; int n2 = 0; if (argc == 2) { m1 = 1; m2 = stoi(argv[1]); } if (argc >= 3) { m1 = stoi(argv[1]); m2 = stoi(argv[2]); } if (argc == 4) { cout << "Incomplete set of arguments\n"; exit(1); } if (argc == 5) { n1 = stoi(argv[3]); n2 = stoi(argv[4]); } if (argc == 1) { m1 = m2 = PARM; n1 = n2 = PARN; } size_t maxMatrixSize = 1 * ((size_t)1 << 30) / (2 * sizeof(dtype)); totalA = maxMatrixSize; #ifdef TSMM auto versions = getEnabledTSMMVersions(); MatmulFunctionType referenceFunction = tsmm_cublas<dtype>; totalB = 104 * 104; totalC = maxMatrixSize; #endif #ifdef TSMTTSM auto versions = getEnabledTSMTTSMVersions(); MatmulFunctionType referenceFunction = tsmttsm_cublas<dtype>; totalB = maxMatrixSize; totalC = 104 * 104; #endif initMatmul(); random_device r; default_random_engine gen(r()); uniform_int_distribution<int> dis(0, 4); int sampleSize = 20; for (int M = m1; M <= m2; M++) { for (int N = n1; N <= n2; N++) { if (n1 == 0 && n2 == 0) N = M; for (const auto& matmulVersion : versions) { cout << M << "xKx" << N << " " << matmulVersion.second << " " << types << " "; bool passed = true; for (int self = 0; self <= 1; self++) { for (htype beta = 0.0; beta <= 1.0; beta += 1.0) { for (int t = 0; t < sampleSize; t++) { int blockCount = uniform_int_distribution<int>(1, 200)(gen); size_t lda = M + dis(gen); #ifdef TSMM size_t ldb = M + dis(gen); size_t ldc = (self == 1 ? max(N + dis(gen), M) : N + dis(gen)); size_t K = maxMatrixSize / max(lda, ldc); #endif #ifdef TSMTTSM size_t ldb = N + dis(gen); size_t ldc = M + dis(gen); size_t K = maxMatrixSize / max(lda, ldb); #endif K = uniform_int_distribution<int>(1, K)(gen); auto result = testMatmul(matmulVersion.first, referenceFunction, M, N, K, lda, ldb, ldc, blockCount, (self == 1), beta); if (result == TESTRESULT::PASS) { cout << "#"; passed &= true; } if (result == TESTRESULT::SKIP) { cout << "\e[35m-\e[0m"; passed &= true; } if (result == TESTRESULT::FAIL) { cout << "\e[31mX\e[0m"; passed &= false; } cout.flush(); } } } if (passed) cout << "\e[32m\e[1m Passed \e[0m\n"; else cout << "\e[31m\e[1m Failed \e[0m\n"; } if (versions.size() > 1) cout << "\n"; } } deInitMatmul(); }
91fba8bed34a31599001482dabbe01722187d60f.cu
#include <cuda_runtime.h> #include <sys/time.h> #include <algorithm> #include <complex> #include <cstdlib> #include <functional> #include <iomanip> #include <iostream> #include <random> #include <vector> #include "cu_complex.h" #include "gpu_error.cuh" #include "types.hpp" #include "versions.hpp" #if !defined PARM || !defined PARN #error "PARM or PARN is not specified! Specify M and N to measure" #endif using namespace std; #ifdef TSMM bool tsmttsm_mode = false; bool tsmm_mode = true; #endif #ifdef TSMTTSM bool tsmttsm_mode = true; bool tsmm_mode = false; #endif void printMatrix(const vector<htype>& m1, const vector<htype>& m2, size_t N1, size_t N2, size_t stride, size_t position = 0, string matchColor = "\e[32m", string mismatchColor = "\e[31m") { const size_t range = 10; size_t n1 = position < range ? 0 : position - range; cout << " - " << n1 << " - \n"; for (; n1 < N1 && n1 < position + range; n1++) { for (size_t n2 = 0; n2 < N2; n2++) { if (m1[n1 * stride + n2] == m2[n1 * stride + n2]) cout << matchColor; else cout << mismatchColor; cout << m1[n1 * stride + n2] << "\e[0m\t"; } cout << "\n"; } cout << " - " << n1 << " - \n"; } vector<htype> hA; vector<htype> hB; vector<htype> hC; vector<htype> hC_test; vector<htype> hC_reference; size_t totalA, totalB, totalC; dtype *A_clean, *B_clean, *C_clean; dtype *A_dirty, *B_dirty, *C_dirty; dtype* temp_storage; void initMatmul() { hA = vector<htype>(totalA); hB = vector<htype>(totalB); hC = vector<htype>(totalC); hC_test = vector<htype>(totalC); hC_reference = vector<htype>(totalC); GPU_ERROR(cudaMalloc(&A_clean, sizeof(dtype) * totalA)); GPU_ERROR(cudaMalloc(&B_clean, sizeof(dtype) * totalB)); GPU_ERROR(cudaMalloc(&C_clean, sizeof(dtype) * totalC)); GPU_ERROR(cudaMalloc(&A_dirty, sizeof(dtype) * totalA)); GPU_ERROR(cudaMalloc(&B_dirty, sizeof(dtype) * totalB)); GPU_ERROR(cudaMalloc(&C_dirty, sizeof(dtype) * totalC)); #pragma omp parallel { random_device r; default_random_engine gen(r()); uniform_int_distribution<int> dis(-2, 2); #pragma omp for for (size_t i = 0; i < totalA; i++) { hA[i] = RAND_HTYPE(dis(gen)); } #pragma omp for for (size_t i = 0; i < totalB; i++) { hB[i] = RAND_HTYPE(dis(gen)); } #pragma omp for for (size_t i = 0; i < totalC; i++) { hC[i] = RAND_HTYPE(dis(gen)); } } GPU_ERROR(cudaMemcpy(A_clean, hA.data(), sizeof(htype) * totalA, cudaMemcpyDefault)); GPU_ERROR(cudaMemcpy(B_clean, hB.data(), sizeof(htype) * totalB, cudaMemcpyDefault)); GPU_ERROR(cudaMemcpy(C_clean, hC.data(), sizeof(htype) * totalC, cudaMemcpyDefault)); GPU_ERROR(cudaDeviceSynchronize()); } void deInitMatmul() { GPU_ERROR(cudaFree(A_clean)); GPU_ERROR(cudaFree(B_clean)); GPU_ERROR(cudaFree(C_clean)); GPU_ERROR(cudaFree(A_dirty)); GPU_ERROR(cudaFree(B_dirty)); GPU_ERROR(cudaFree(C_dirty)); } bool cleanMatmul(MatmulFunctionType matmulFunction, size_t M, size_t N, size_t K, int lda, int ldb, int ldc, size_t blockCount, bool self, htype beta, vector<htype>& resultDest) { GPU_ERROR( cudaMemcpy(A_dirty, A_clean, sizeof(htype) * totalA, cudaMemcpyDefault)); GPU_ERROR( cudaMemcpy(B_dirty, B_clean, sizeof(htype) * totalB, cudaMemcpyDefault)); GPU_ERROR( cudaMemcpy(C_dirty, C_clean, sizeof(htype) * totalC, cudaMemcpyDefault)); dtype dalpha = makeDtype(1.0); dtype dbeta = makeDtype(beta); bool result; if (!self) { result = matmulFunction(blockCount, M, N, K, A_dirty, lda, dalpha, B_dirty, ldb, dbeta, C_dirty, ldc); } else if (tsmm_mode) { result = matmulFunction(blockCount, M, N, K, C_dirty, ldc, dalpha, B_dirty, ldb, dbeta, C_dirty, ldc); } else if (M == N) { result = matmulFunction(blockCount, M, N, K, A_dirty, lda, dalpha, A_dirty, lda, dbeta, C_dirty, ldc); } else { result = false; } GPU_ERROR(cudaMemcpy(resultDest.data(), C_dirty, sizeof(htype) * totalC, cudaMemcpyDefault)); return result; } enum class TESTRESULT { PASS, SKIP, FAIL }; TESTRESULT testMatmul(MatmulFunctionType matmulFunction, MatmulFunctionType referenceFunction, size_t M, size_t N, size_t K, int lda, int ldb, int ldc, size_t blockCount, bool self, htype beta) { // matmulFunction does not support parameters, this is a pass if (!cleanMatmul(matmulFunction, M, N, K, lda, ldb, ldc, blockCount, self, beta, hC_test)) return TESTRESULT::SKIP; GPU_ERROR(cudaDeviceSynchronize()); cleanMatmul(referenceFunction, M, N, K, lda, ldb, ldc, blockCount, self, beta, hC_reference); GPU_ERROR(cudaDeviceSynchronize()); bool passed = true; #ifdef TSMM size_t C1 = K; size_t C2 = N; #endif #ifdef TSMTTSM size_t C1 = N; size_t C2 = M; #endif for (size_t c1 = 0; c1 < C1; c1++) { for (size_t c2 = 0; c2 < C2; c2++) { if (hC_test[c1 * ldc + c2] != hC_reference[c1 * ldc + c2]) { cout << "\n( " << blockCount << " blocks, " << ((self) ? "A*A" : "A*B") << ", beta=" << beta << ", lda=" << lda << ", ldb=" << ldb << ", ldc=" << ldc << ") "; cout << "\e[31mMismatch\e[0m at " << c1 << ", " << c2 << "; " << hC_test[c1 * ldc + c2] << " != " << hC_reference[c1 * ldc + c2] << " "; #ifdef VERBOSE_ERRORS cout << "\n"; printMatrix(hC_test, hC_reference, C1, C2, ldc, c1); cout << "\n--\n"; printMatrix(hC_reference, hC_reference, C1, C2, ldc, c1); cout << "--\n\n"; cout << K << " Rows\n"; #endif passed = false; break; } } if (!passed) break; } if (passed) return TESTRESULT::PASS; else return TESTRESULT::FAIL; } int main(int argc, char** argv) { int m1 = 0; int m2 = 0; int n1 = 0; int n2 = 0; if (argc == 2) { m1 = 1; m2 = stoi(argv[1]); } if (argc >= 3) { m1 = stoi(argv[1]); m2 = stoi(argv[2]); } if (argc == 4) { cout << "Incomplete set of arguments\n"; exit(1); } if (argc == 5) { n1 = stoi(argv[3]); n2 = stoi(argv[4]); } if (argc == 1) { m1 = m2 = PARM; n1 = n2 = PARN; } size_t maxMatrixSize = 1 * ((size_t)1 << 30) / (2 * sizeof(dtype)); totalA = maxMatrixSize; #ifdef TSMM auto versions = getEnabledTSMMVersions(); MatmulFunctionType referenceFunction = tsmm_cublas<dtype>; totalB = 104 * 104; totalC = maxMatrixSize; #endif #ifdef TSMTTSM auto versions = getEnabledTSMTTSMVersions(); MatmulFunctionType referenceFunction = tsmttsm_cublas<dtype>; totalB = maxMatrixSize; totalC = 104 * 104; #endif initMatmul(); random_device r; default_random_engine gen(r()); uniform_int_distribution<int> dis(0, 4); int sampleSize = 20; for (int M = m1; M <= m2; M++) { for (int N = n1; N <= n2; N++) { if (n1 == 0 && n2 == 0) N = M; for (const auto& matmulVersion : versions) { cout << M << "xKx" << N << " " << matmulVersion.second << " " << types << " "; bool passed = true; for (int self = 0; self <= 1; self++) { for (htype beta = 0.0; beta <= 1.0; beta += 1.0) { for (int t = 0; t < sampleSize; t++) { int blockCount = uniform_int_distribution<int>(1, 200)(gen); size_t lda = M + dis(gen); #ifdef TSMM size_t ldb = M + dis(gen); size_t ldc = (self == 1 ? max(N + dis(gen), M) : N + dis(gen)); size_t K = maxMatrixSize / max(lda, ldc); #endif #ifdef TSMTTSM size_t ldb = N + dis(gen); size_t ldc = M + dis(gen); size_t K = maxMatrixSize / max(lda, ldb); #endif K = uniform_int_distribution<int>(1, K)(gen); auto result = testMatmul(matmulVersion.first, referenceFunction, M, N, K, lda, ldb, ldc, blockCount, (self == 1), beta); if (result == TESTRESULT::PASS) { cout << "#"; passed &= true; } if (result == TESTRESULT::SKIP) { cout << "\e[35m-\e[0m"; passed &= true; } if (result == TESTRESULT::FAIL) { cout << "\e[31mX\e[0m"; passed &= false; } cout.flush(); } } } if (passed) cout << "\e[32m\e[1m Passed \e[0m\n"; else cout << "\e[31m\e[1m Failed \e[0m\n"; } if (versions.size() > 1) cout << "\n"; } } deInitMatmul(); }
f28de38270d0a6900e560ed9a476edf104e6c7a2.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parquet_reader_impl.hpp" #include <io/comp/gpuinflate.h> #include <hip/hip_runtime.h> #include <nvstrings/NVStrings.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <array> namespace cudf { namespace io { namespace parquet { #if 0 #define LOG_PRINTF(...) std::printf(__VA_ARGS__) #else #define LOG_PRINTF(...) (void)0 #endif /** * @brief Function that translates Parquet datatype to GDF dtype **/ constexpr std::pair<gdf_dtype, gdf_dtype_extra_info> to_dtype( parquet::Type physical, parquet::ConvertedType logical, bool strings_to_categorical) { // Logical type used for actual data interpretation; the legacy converted type // is superceded by 'logical' type whenever available. switch (logical) { case parquet::UINT_8: case parquet::INT_8: return std::make_pair(GDF_INT8, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::UINT_16: case parquet::INT_16: return std::make_pair(GDF_INT16, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::DATE: return std::make_pair(GDF_DATE32, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::TIMESTAMP_MICROS: #if !PARQUET_GPU_USEC_TO_MSEC return std::make_pair(GDF_DATE64, gdf_dtype_extra_info{TIME_UNIT_us}); #endif case parquet::TIMESTAMP_MILLIS: return std::make_pair(GDF_DATE64, gdf_dtype_extra_info{TIME_UNIT_ms}); default: break; } // Physical storage type supported by Parquet; controls the on-disk storage // format in combination with the encoding type. switch (physical) { case parquet::BOOLEAN: return std::make_pair(GDF_BOOL8, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::INT32: return std::make_pair(GDF_INT32, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::INT64: return std::make_pair(GDF_INT64, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::FLOAT: return std::make_pair(GDF_FLOAT32, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::DOUBLE: return std::make_pair(GDF_FLOAT64, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::BYTE_ARRAY: case parquet::FIXED_LEN_BYTE_ARRAY: // Can be mapped to GDF_CATEGORY (32-bit hash) or GDF_STRING (nvstring) return std::make_pair(strings_to_categorical ? GDF_CATEGORY : GDF_STRING, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::INT96: // Convert Spark INT96 timestamp to GDF_DATE64 return std::make_pair(GDF_DATE64, gdf_dtype_extra_info{TIME_UNIT_ms}); default: break; } return std::make_pair(GDF_invalid, gdf_dtype_extra_info{TIME_UNIT_NONE}); } /** * @brief Helper that returns the required the number of bits to store a value **/ template <typename T = uint8_t> T required_bits(uint32_t max_level) { return static_cast<T>(parquet::CompactProtocolReader::NumRequiredBits(max_level)); } /** * @brief A helper wrapper for Parquet file metadata. Provides some additional * convenience methods for initializing and accessing the metadata and schema **/ struct ParquetMetadata : public parquet::FileMetaData { explicit ParquetMetadata(DataSource *source) { constexpr auto header_len = sizeof(parquet::file_header_s); constexpr auto ender_len = sizeof(parquet::file_ender_s); const auto len = source->size(); const auto header_buffer = source->get_buffer(0, header_len); const auto header = (const parquet::file_header_s *)header_buffer->data(); const auto ender_buffer = source->get_buffer(len - ender_len, ender_len); const auto ender = (const parquet::file_ender_s *)ender_buffer->data(); CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source"); CUDF_EXPECTS( header->magic == PARQUET_MAGIC && ender->magic == PARQUET_MAGIC, "Corrupted header or footer"); CUDF_EXPECTS(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len), "Incorrect footer length"); const auto buffer = source->get_buffer(len - ender->footer_len - ender_len, ender->footer_len); parquet::CompactProtocolReader cp(buffer->data(), ender->footer_len); CUDF_EXPECTS(cp.read(this), "Cannot parse metadata"); CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema"); print_metadata(); } inline int get_total_rows() const { return num_rows; } inline int get_num_row_groups() const { return row_groups.size(); } inline int get_num_columns() const { return row_groups[0].columns.size(); } std::string get_column_name(const std::vector<std::string> &path_in_schema) { std::string s = (path_in_schema.size() > 0) ? path_in_schema[0] : ""; for (size_t i = 1; i < path_in_schema.size(); i++) { s += "." + path_in_schema[i]; } return s; } std::vector<std::string> get_column_names() { std::vector<std::string> all_names; for (const auto &chunk : row_groups[0].columns) { all_names.emplace_back(get_column_name(chunk.meta_data.path_in_schema)); } return all_names; } /** * @brief Extracts the column name used for the row indexes in a dataframe * * PANDAS adds its own metadata to the key_value section when writing out the * dataframe to a file to aid in exact reconstruction. The JSON-formatted * metadata contains the index column(s) and PANDA-specific datatypes. * * @return std::string Name of the index column **/ std::string get_index_column_name() { auto it = std::find_if(key_value_metadata.begin(), key_value_metadata.end(), [](const auto &item) { return item.key == "pandas"; }); if (it != key_value_metadata.end()) { const auto pos = it->value.find("index_columns"); if (pos != std::string::npos) { const auto begin = it->value.find('[', pos); const auto end = it->value.find(']', begin); if ((end - begin) > 1) { return it->value.substr(begin + 2, end - begin - 3); } } } return ""; } /** * @brief Filters and reduces down to a selection of row groups * * @param[in] row_group Index of the row group to select * @param[in,out] row_start Starting row of the selection * @param[in,out] row_count Total number of rows selected * * @return List of row group indexes and its starting row **/ auto select_row_groups(int row_group, int &row_start, int &row_count) { std::vector<std::pair<int, int>> selection; if (row_group != -1) { CUDF_EXPECTS(row_group < get_num_row_groups(), "Non-existent row group"); for (int i = 0; i < row_group; ++i) { row_start += row_groups[i].num_rows; } selection.emplace_back(row_group, row_start); row_count = row_groups[row_group].num_rows; } else { row_start = ::max(row_start, 0); if (row_count == -1) { row_count = get_total_rows(); } CUDF_EXPECTS(row_count >= 0, "Invalid row count"); CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start"); for (int i = 0, count = 0; i < (int)row_groups.size(); ++i) { count += row_groups[i].num_rows; if (count > row_start || count == 0) { selection.emplace_back(i, count - row_groups[i].num_rows); } if (count >= (row_start + row_count)) { break; } } } return selection; } /** * @brief Filters and reduces down to a selection of columns * * @param[in] use_names List of column names to select * @param[in] use_index_col Name of the index column * * @return List of column names & Parquet column indexes **/ auto select_columns(std::vector<std::string> use_names, const char *use_index_col) { std::vector<std::pair<int, std::string>> selection; if (not use_names.empty()) { if (get_total_rows() > 0) { if (std::find(use_names.begin(), use_names.end(), use_index_col) == use_names.end()) { use_names.push_back(use_index_col); } } for (const auto &use_name : use_names) { size_t index = 0; for (const auto &name : get_column_names()) { if (name == use_name) { selection.emplace_back(index, name); break; } index++; } } } else { for (const auto &name : get_column_names()) { if (get_total_rows() > 0 || name != use_index_col) { selection.emplace_back(selection.size(), name); } } } CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns"); return selection; } void print_metadata() const { LOG_PRINTF("\n[+] Metadata:\n"); LOG_PRINTF(" version = %d\n", version); LOG_PRINTF(" created_by = \"%s\"\n", created_by.c_str()); LOG_PRINTF(" schema (%zd entries):\n", schema.size()); for (size_t i = 0; i < schema.size(); i++) { LOG_PRINTF( " [%zd] type=%d, name=\"%s\", num_children=%d, rep_type=%d, " "max_def_lvl=%d, max_rep_lvl=%d\n", i, schema[i].type, schema[i].name.c_str(), schema[i].num_children, schema[i].repetition_type, schema[i].max_definition_level, schema[i].max_repetition_level); } LOG_PRINTF(" num rows = %zd\n", (size_t)num_rows); LOG_PRINTF(" num row groups = %zd\n", row_groups.size()); LOG_PRINTF(" num columns = %zd\n", row_groups[0].columns.size()); } }; size_t reader::Impl::count_page_headers( const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks) { size_t total_pages = 0; CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice)); CUDA_TRY(parquet::gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size())); CUDA_TRY(hipMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), hipMemcpyDeviceToHost)); CUDA_TRY(hipStreamSynchronize(0)); LOG_PRINTF("[+] Chunk Information\n"); for (size_t c = 0; c < chunks.size(); c++) { LOG_PRINTF( " %2zd: comp_data=%ld, comp_size=%zd, num_values=%zd\n " "start_row=%zd num_rows=%d max_def_level=%d max_rep_level=%d\n " "data_type=%d def_level_bits=%d rep_level_bits=%d\n " "num_data_pages=%d num_dict_pages=%d max_num_pages=%d\n", c, (uint64_t)chunks[c].compressed_data, chunks[c].compressed_size, chunks[c].num_values, chunks[c].start_row, chunks[c].num_rows, chunks[c].max_def_level, chunks[c].max_rep_level, chunks[c].data_type, chunks[c].def_level_bits, chunks[c].rep_level_bits, chunks[c].num_data_pages, chunks[c].num_dict_pages, chunks[c].max_num_pages); total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages; } return total_pages; } void reader::Impl::decode_page_headers( const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks, const hostdevice_vector<parquet::gpu::PageInfo> &pages) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages; chunks[c].page_info = pages.device_ptr(page_count); page_count += chunks[c].max_num_pages; } CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice)); CUDA_TRY(parquet::gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size())); CUDA_TRY(hipMemcpyAsync(pages.host_ptr(), pages.device_ptr(), pages.memory_size(), hipMemcpyDeviceToHost)); CUDA_TRY(hipStreamSynchronize(0)); LOG_PRINTF("[+] Page Header Information\n"); for (size_t i = 0; i < pages.size(); i++) { LOG_PRINTF( " %2zd: comp_size=%d, uncomp_size=%d, num_values=%d, chunk_row=%d, " "num_rows=%d\n chunk_idx=%d, flags=%d, encoding=%d, def_level=%d " "rep_level=%d, valid_count=%d\n", i, pages[i].compressed_page_size, pages[i].uncompressed_page_size, pages[i].num_values, pages[i].chunk_row, pages[i].num_rows, pages[i].chunk_idx, pages[i].flags, pages[i].encoding, pages[i].definition_level_encoding, pages[i].repetition_level_encoding, pages[i].valid_count); } } device_buffer<uint8_t> reader::Impl::decompress_page_data( const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks, const hostdevice_vector<parquet::gpu::PageInfo> &pages) { auto for_each_codec_page = [&](parquet::Compression codec, const std::function<void(size_t)> &f) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { const auto page_stride = chunks[c].max_num_pages; if (chunks[c].codec == codec) { for (int k = 0; k < page_stride; k++) { f(page_count + k); } } page_count += page_stride; } }; // Brotli scratch memory for decompressing rmm::device_vector<uint8_t> debrotli_scratch; // Count the exact number of compressed pages size_t num_compressed_pages = 0; size_t total_decompressed_size = 0; std::array<std::pair<parquet::Compression, size_t>, 3> codecs{ std::make_pair(parquet::GZIP, 0), std::make_pair(parquet::SNAPPY, 0), std::make_pair(parquet::BROTLI, 0)}; for (auto &codec : codecs) { for_each_codec_page(codec.first, [&](size_t page) { total_decompressed_size += pages[page].uncompressed_page_size; codec.second++; num_compressed_pages++; }); if (codec.first == parquet::BROTLI && codec.second > 0) { debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.second)); } } LOG_PRINTF( "[+] Compression\n Total compressed size: %zd\n Number of " "compressed pages: %zd\n gzip: %zd \n snappy: %zd\n", total_decompressed_size, num_compressed_pages, codecs[0].second, codecs[1].second); // Dispatch batches of pages to decompress for each codec device_buffer<uint8_t> decomp_pages(total_decompressed_size); hostdevice_vector<gpu_inflate_input_s> inflate_in(0, num_compressed_pages); hostdevice_vector<gpu_inflate_status_s> inflate_out(0, num_compressed_pages); size_t decomp_offset = 0; int32_t argc = 0; for (const auto &codec : codecs) { if (codec.second > 0) { int32_t start_pos = argc; for_each_codec_page(codec.first, [&](size_t page) { inflate_in[argc].srcDevice = pages[page].page_data; inflate_in[argc].srcSize = pages[page].compressed_page_size; inflate_in[argc].dstDevice = decomp_pages.data() + decomp_offset; inflate_in[argc].dstSize = pages[page].uncompressed_page_size; inflate_out[argc].bytes_written = 0; inflate_out[argc].status = static_cast<uint32_t>(-1000); inflate_out[argc].reserved = 0; pages[page].page_data = (uint8_t *)inflate_in[argc].dstDevice; decomp_offset += inflate_in[argc].dstSize; argc++; }); CUDA_TRY(hipMemcpyAsync( inflate_in.device_ptr(start_pos), inflate_in.host_ptr(start_pos), sizeof(decltype(inflate_in)::value_type) * (argc - start_pos), hipMemcpyHostToDevice)); CUDA_TRY(hipMemcpyAsync( inflate_out.device_ptr(start_pos), inflate_out.host_ptr(start_pos), sizeof(decltype(inflate_out)::value_type) * (argc - start_pos), hipMemcpyHostToDevice)); switch (codec.first) { case parquet::GZIP: CUDA_TRY(gpuinflate(inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), argc - start_pos, 1)) break; case parquet::SNAPPY: CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), argc - start_pos)); break; case parquet::BROTLI: CUDA_TRY(gpu_debrotli(inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), debrotli_scratch.data().get(), debrotli_scratch.size(), argc - start_pos)); break; default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break; } CUDA_TRY(hipMemcpyAsync( inflate_out.host_ptr(start_pos), inflate_out.device_ptr(start_pos), sizeof(decltype(inflate_out)::value_type) * (argc - start_pos), hipMemcpyDeviceToHost)); } } CUDA_TRY(hipStreamSynchronize(0)); // Update the page information in device memory with the updated value of // page_data; it now points to the uncompressed data buffer CUDA_TRY(hipMemcpyAsync(pages.device_ptr(), pages.host_ptr(), pages.memory_size(), hipMemcpyHostToDevice)); return decomp_pages; } void reader::Impl::decode_page_data( const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks, const hostdevice_vector<parquet::gpu::PageInfo> &pages, const std::vector<gdf_column *> &chunk_map, size_t min_row, size_t total_rows) { auto is_dict_chunk = [](const parquet::gpu::ColumnChunkDesc &chunk) { return (chunk.data_type & 0x7) == parquet::BYTE_ARRAY && chunk.num_dict_pages > 0; }; // Count the number of string dictionary entries // NOTE: Assumes first page in the chunk is always the dictionary page size_t total_str_dict_indexes = 0; for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { total_str_dict_indexes += pages[page_count].num_values; } page_count += chunks[c].max_num_pages; } // Build index for string dictionaries since they can't be indexed // directly due to variable-sized elements rmm::device_vector<parquet::gpu::nvstrdesc_s> str_dict_index; if (total_str_dict_indexes > 0) { str_dict_index.resize(total_str_dict_indexes); } // Update chunks with pointers to column data for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { chunks[c].str_dict_index = str_dict_index.data().get() + str_ofs; str_ofs += pages[page_count].num_values; } chunks[c].valid_map_base = (uint32_t *)chunk_map[c]->valid; chunks[c].column_data_base = chunk_map[c]->data; page_count += chunks[c].max_num_pages; } CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice)); if (total_str_dict_indexes > 0) { CUDA_TRY(BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size())); } CUDA_TRY(DecodePageData(pages.device_ptr(), pages.size(), chunks.device_ptr(), chunks.size(), total_rows, min_row)); CUDA_TRY(hipMemcpyAsync(pages.host_ptr(), pages.device_ptr(), pages.memory_size(), hipMemcpyDeviceToHost)); CUDA_TRY(hipStreamSynchronize(0)); LOG_PRINTF("[+] Page Data Information\n"); for (size_t i = 0; i < pages.size(); i++) { if (pages[i].num_rows > 0) { LOG_PRINTF(" %2zd: valid_count=%d/%d\n", i, pages[i].valid_count, pages[i].num_rows); const size_t c = pages[i].chunk_idx; if (c < chunks.size()) { chunk_map[c]->null_count += pages[i].num_rows - pages[i].valid_count; } } } } reader::Impl::Impl(std::unique_ptr<DataSource> source, reader_options const &options) : source_(std::move(source)) { // Open and parse the source Parquet dataset metadata md_ = std::make_unique<ParquetMetadata>(source_.get()); // Store the index column (PANDAS-specific) index_col_ = md_->get_index_column_name(); // Select only columns required by the options selected_cols_ = md_->select_columns(options.columns, index_col_.c_str()); // Strings may be returned as either GDF_STRING or GDF_CATEGORY columns strings_to_categorical_ = options.strings_to_categorical; } table reader::Impl::read(int skip_rows, int num_rows, int row_group) { // Select only row groups required const auto selected_row_groups = md_->select_row_groups(row_group, skip_rows, num_rows); const auto num_columns = selected_cols_.size(); // Initialize gdf_columns, but hold off on allocating storage space LOG_PRINTF("[+] Selected row groups: %d\n", (int)selected_row_groups.size()); LOG_PRINTF("[+] Selected columns: %d\n", (int)num_columns); LOG_PRINTF("[+] Selected skip_rows: %d num_rows: %d\n", skip_rows, num_rows); std::vector<gdf_column_wrapper> columns; for (const auto &col : selected_cols_) { auto row_group_0 = md_->row_groups[selected_row_groups[0].first]; auto &col_schema = md_->schema[row_group_0.columns[col.first].schema_idx]; auto dtype_info = to_dtype(col_schema.type, col_schema.converted_type, strings_to_categorical_); columns.emplace_back(static_cast<gdf_size_type>(num_rows), dtype_info.first, dtype_info.second, col.second); LOG_PRINTF(" %2zd: name=%s size=%zd type=%d data=%lx valid=%lx\n", columns.size() - 1, columns.back()->col_name, (size_t)columns.back()->size, columns.back()->dtype, (uint64_t)columns.back()->data, (uint64_t)columns.back()->valid); } // Descriptors for all the chunks that make up the selected columns const auto num_column_chunks = selected_row_groups.size() * num_columns; hostdevice_vector<parquet::gpu::ColumnChunkDesc> chunks(0, num_column_chunks); // Association between each column chunk and its gdf_column std::vector<gdf_column *> chunk_map(num_column_chunks); // Tracker for eventually deallocating compressed and uncompressed data std::vector<device_buffer<uint8_t>> page_data; // Initialize column chunk info LOG_PRINTF("[+] Column Chunk Description\n"); size_t total_decompressed_size = 0; auto remaining_rows = num_rows; for (const auto &rg : selected_row_groups) { const auto row_group = md_->row_groups[rg.first]; const auto row_group_start = rg.second; const auto row_group_rows = ::min(remaining_rows, (int)row_group.num_rows); for (size_t i = 0; i < num_columns; ++i) { auto col = selected_cols_[i]; auto &col_meta = row_group.columns[col.first].meta_data; auto &col_schema = md_->schema[row_group.columns[col.first].schema_idx]; auto &gdf_column = columns[i]; // Spec requires each row group to contain exactly one chunk for every // column. If there are too many or too few, continue with best effort if (col.second != md_->get_column_name(col_meta.path_in_schema)) { std::cerr << "Detected mismatched column chunk" << std::endl; continue; } if (chunks.size() >= chunks.max_size()) { std::cerr << "Detected too many column chunks" << std::endl; continue; } int32_t type_width = (col_schema.type == parquet::FIXED_LEN_BYTE_ARRAY) ? (col_schema.type_length << 3) : 0; if (gdf_column->dtype == GDF_INT8) type_width = 1; // I32 -> I8 else if (gdf_column->dtype == GDF_INT16) type_width = 2; // I32 -> I16 else if (gdf_column->dtype == GDF_CATEGORY) type_width = 4; // str -> hash32 uint8_t *d_compdata = nullptr; if (col_meta.total_compressed_size != 0) { const auto offset = (col_meta.dictionary_page_offset != 0) ? ::min(col_meta.data_page_offset, col_meta.dictionary_page_offset) : col_meta.data_page_offset; page_data.emplace_back(col_meta.total_compressed_size); d_compdata = page_data.back().data(); const auto buffer = source_->get_buffer(offset, col_meta.total_compressed_size); CUDA_TRY(hipMemcpyAsync(d_compdata, buffer->data(), col_meta.total_compressed_size, hipMemcpyHostToDevice)); CUDA_TRY(hipStreamSynchronize(0)); } chunks.insert(parquet::gpu::ColumnChunkDesc( col_meta.total_compressed_size, d_compdata, col_meta.num_values, col_schema.type, type_width, row_group_start, row_group_rows, col_schema.max_definition_level, col_schema.max_repetition_level, required_bits(col_schema.max_definition_level), required_bits(col_schema.max_repetition_level), col_meta.codec, col_schema.converted_type)); LOG_PRINTF( " %2d: %s start_row=%d, num_rows=%d, codec=%d, " "num_values=%ld\n total_compressed_size=%ld " "total_uncompressed_size=%ld\n schema_idx=%d, type=%d, " "type_width=%d, max_def_level=%d, " "max_rep_level=%d\n data_page_offset=%zd, index_page_offset=%zd, " "dict_page_offset=%zd\n", col.first, col.second.c_str(), row_group_start, row_group_rows, col_meta.codec, col_meta.num_values, col_meta.total_compressed_size, col_meta.total_uncompressed_size, row_group.columns[col.first].schema_idx, chunks[chunks.size() - 1].data_type, type_width, col_schema.max_definition_level, col_schema.max_repetition_level, (size_t)col_meta.data_page_offset, (size_t)col_meta.index_page_offset, (size_t)col_meta.dictionary_page_offset); // Map each column chunk to its output gdf_column chunk_map[chunks.size() - 1] = gdf_column.get(); if (col_meta.codec != parquet::Compression::UNCOMPRESSED) { total_decompressed_size += col_meta.total_uncompressed_size; } } remaining_rows -= row_group.num_rows; } assert(remaining_rows <= 0); // Allocate output memory and convert Parquet format into cuDF format const auto total_pages = count_page_headers(chunks); if (total_pages > 0) { hostdevice_vector<parquet::gpu::PageInfo> pages(total_pages, total_pages); decode_page_headers(chunks, pages); if (total_decompressed_size > 0) { auto decomp_page_data = decompress_page_data(chunks, pages); page_data.clear(); page_data.push_back(std::move(decomp_page_data)); } for (auto &column : columns) { CUDF_EXPECTS(column.allocate() == GDF_SUCCESS, "Cannot allocate columns"); } decode_page_data(chunks, pages, chunk_map, skip_rows, num_rows); } else { // Columns' data's memory is still expected for an empty dataframe for (auto &column : columns) { CUDF_EXPECTS(column.allocate() == GDF_SUCCESS, "Cannot allocate columns"); } } // For string dtype, allocate an NvStrings container instance, deallocating // the original string list memory in the process. // This container takes a list of string pointers and lengths, and copies // into its own memory so the source memory must not be released yet. for (auto &column : columns) { if (column->dtype == GDF_STRING) { using str_pair = std::pair<const char *, size_t>; using str_ptr = std::unique_ptr<NVStrings, decltype(&NVStrings::destroy)>; auto str_list = static_cast<str_pair *>(column->data); str_ptr str_data(NVStrings::create_from_index(str_list, num_rows), &NVStrings::destroy); RMM_FREE(std::exchange(column->data, str_data.release()), 0); } } // Transfer ownership to raw pointer output arguments std::vector<gdf_column *> out_cols(columns.size()); for (size_t i = 0; i < columns.size(); ++i) { out_cols[i] = columns[i].release(); } return table(out_cols.data(), out_cols.size()); } reader::reader(std::string filepath, reader_options const &options) : impl_(std::make_unique<Impl>( std::make_unique<DataSource>(filepath.c_str()), options)) {} reader::reader(const char *buffer, size_t length, reader_options const &options) : impl_(std::make_unique<Impl>( std::make_unique<DataSource>(buffer, length), options)) {} reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file, reader_options const &options) : impl_(std::make_unique<Impl>( std::make_unique<DataSource>(file), options)) {} std::string reader::get_index_column() { return impl_->get_index_column(); } table reader::read_all() { return impl_->read(0, -1, -1); } table reader::read_rows(size_t skip_rows, size_t num_rows) { return impl_->read(skip_rows, (num_rows != 0) ? (int)num_rows : -1, -1); } table reader::read_row_group(size_t row_group) { return impl_->read(0, -1, row_group); } reader::~reader() = default; } // namespace parquet } // namespace io } // namespace cudf
f28de38270d0a6900e560ed9a476edf104e6c7a2.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parquet_reader_impl.hpp" #include <io/comp/gpuinflate.h> #include <cuda_runtime.h> #include <nvstrings/NVStrings.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <array> namespace cudf { namespace io { namespace parquet { #if 0 #define LOG_PRINTF(...) std::printf(__VA_ARGS__) #else #define LOG_PRINTF(...) (void)0 #endif /** * @brief Function that translates Parquet datatype to GDF dtype **/ constexpr std::pair<gdf_dtype, gdf_dtype_extra_info> to_dtype( parquet::Type physical, parquet::ConvertedType logical, bool strings_to_categorical) { // Logical type used for actual data interpretation; the legacy converted type // is superceded by 'logical' type whenever available. switch (logical) { case parquet::UINT_8: case parquet::INT_8: return std::make_pair(GDF_INT8, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::UINT_16: case parquet::INT_16: return std::make_pair(GDF_INT16, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::DATE: return std::make_pair(GDF_DATE32, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::TIMESTAMP_MICROS: #if !PARQUET_GPU_USEC_TO_MSEC return std::make_pair(GDF_DATE64, gdf_dtype_extra_info{TIME_UNIT_us}); #endif case parquet::TIMESTAMP_MILLIS: return std::make_pair(GDF_DATE64, gdf_dtype_extra_info{TIME_UNIT_ms}); default: break; } // Physical storage type supported by Parquet; controls the on-disk storage // format in combination with the encoding type. switch (physical) { case parquet::BOOLEAN: return std::make_pair(GDF_BOOL8, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::INT32: return std::make_pair(GDF_INT32, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::INT64: return std::make_pair(GDF_INT64, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::FLOAT: return std::make_pair(GDF_FLOAT32, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::DOUBLE: return std::make_pair(GDF_FLOAT64, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::BYTE_ARRAY: case parquet::FIXED_LEN_BYTE_ARRAY: // Can be mapped to GDF_CATEGORY (32-bit hash) or GDF_STRING (nvstring) return std::make_pair(strings_to_categorical ? GDF_CATEGORY : GDF_STRING, gdf_dtype_extra_info{TIME_UNIT_NONE}); case parquet::INT96: // Convert Spark INT96 timestamp to GDF_DATE64 return std::make_pair(GDF_DATE64, gdf_dtype_extra_info{TIME_UNIT_ms}); default: break; } return std::make_pair(GDF_invalid, gdf_dtype_extra_info{TIME_UNIT_NONE}); } /** * @brief Helper that returns the required the number of bits to store a value **/ template <typename T = uint8_t> T required_bits(uint32_t max_level) { return static_cast<T>(parquet::CompactProtocolReader::NumRequiredBits(max_level)); } /** * @brief A helper wrapper for Parquet file metadata. Provides some additional * convenience methods for initializing and accessing the metadata and schema **/ struct ParquetMetadata : public parquet::FileMetaData { explicit ParquetMetadata(DataSource *source) { constexpr auto header_len = sizeof(parquet::file_header_s); constexpr auto ender_len = sizeof(parquet::file_ender_s); const auto len = source->size(); const auto header_buffer = source->get_buffer(0, header_len); const auto header = (const parquet::file_header_s *)header_buffer->data(); const auto ender_buffer = source->get_buffer(len - ender_len, ender_len); const auto ender = (const parquet::file_ender_s *)ender_buffer->data(); CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source"); CUDF_EXPECTS( header->magic == PARQUET_MAGIC && ender->magic == PARQUET_MAGIC, "Corrupted header or footer"); CUDF_EXPECTS(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len), "Incorrect footer length"); const auto buffer = source->get_buffer(len - ender->footer_len - ender_len, ender->footer_len); parquet::CompactProtocolReader cp(buffer->data(), ender->footer_len); CUDF_EXPECTS(cp.read(this), "Cannot parse metadata"); CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema"); print_metadata(); } inline int get_total_rows() const { return num_rows; } inline int get_num_row_groups() const { return row_groups.size(); } inline int get_num_columns() const { return row_groups[0].columns.size(); } std::string get_column_name(const std::vector<std::string> &path_in_schema) { std::string s = (path_in_schema.size() > 0) ? path_in_schema[0] : ""; for (size_t i = 1; i < path_in_schema.size(); i++) { s += "." + path_in_schema[i]; } return s; } std::vector<std::string> get_column_names() { std::vector<std::string> all_names; for (const auto &chunk : row_groups[0].columns) { all_names.emplace_back(get_column_name(chunk.meta_data.path_in_schema)); } return all_names; } /** * @brief Extracts the column name used for the row indexes in a dataframe * * PANDAS adds its own metadata to the key_value section when writing out the * dataframe to a file to aid in exact reconstruction. The JSON-formatted * metadata contains the index column(s) and PANDA-specific datatypes. * * @return std::string Name of the index column **/ std::string get_index_column_name() { auto it = std::find_if(key_value_metadata.begin(), key_value_metadata.end(), [](const auto &item) { return item.key == "pandas"; }); if (it != key_value_metadata.end()) { const auto pos = it->value.find("index_columns"); if (pos != std::string::npos) { const auto begin = it->value.find('[', pos); const auto end = it->value.find(']', begin); if ((end - begin) > 1) { return it->value.substr(begin + 2, end - begin - 3); } } } return ""; } /** * @brief Filters and reduces down to a selection of row groups * * @param[in] row_group Index of the row group to select * @param[in,out] row_start Starting row of the selection * @param[in,out] row_count Total number of rows selected * * @return List of row group indexes and its starting row **/ auto select_row_groups(int row_group, int &row_start, int &row_count) { std::vector<std::pair<int, int>> selection; if (row_group != -1) { CUDF_EXPECTS(row_group < get_num_row_groups(), "Non-existent row group"); for (int i = 0; i < row_group; ++i) { row_start += row_groups[i].num_rows; } selection.emplace_back(row_group, row_start); row_count = row_groups[row_group].num_rows; } else { row_start = std::max(row_start, 0); if (row_count == -1) { row_count = get_total_rows(); } CUDF_EXPECTS(row_count >= 0, "Invalid row count"); CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start"); for (int i = 0, count = 0; i < (int)row_groups.size(); ++i) { count += row_groups[i].num_rows; if (count > row_start || count == 0) { selection.emplace_back(i, count - row_groups[i].num_rows); } if (count >= (row_start + row_count)) { break; } } } return selection; } /** * @brief Filters and reduces down to a selection of columns * * @param[in] use_names List of column names to select * @param[in] use_index_col Name of the index column * * @return List of column names & Parquet column indexes **/ auto select_columns(std::vector<std::string> use_names, const char *use_index_col) { std::vector<std::pair<int, std::string>> selection; if (not use_names.empty()) { if (get_total_rows() > 0) { if (std::find(use_names.begin(), use_names.end(), use_index_col) == use_names.end()) { use_names.push_back(use_index_col); } } for (const auto &use_name : use_names) { size_t index = 0; for (const auto &name : get_column_names()) { if (name == use_name) { selection.emplace_back(index, name); break; } index++; } } } else { for (const auto &name : get_column_names()) { if (get_total_rows() > 0 || name != use_index_col) { selection.emplace_back(selection.size(), name); } } } CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns"); return selection; } void print_metadata() const { LOG_PRINTF("\n[+] Metadata:\n"); LOG_PRINTF(" version = %d\n", version); LOG_PRINTF(" created_by = \"%s\"\n", created_by.c_str()); LOG_PRINTF(" schema (%zd entries):\n", schema.size()); for (size_t i = 0; i < schema.size(); i++) { LOG_PRINTF( " [%zd] type=%d, name=\"%s\", num_children=%d, rep_type=%d, " "max_def_lvl=%d, max_rep_lvl=%d\n", i, schema[i].type, schema[i].name.c_str(), schema[i].num_children, schema[i].repetition_type, schema[i].max_definition_level, schema[i].max_repetition_level); } LOG_PRINTF(" num rows = %zd\n", (size_t)num_rows); LOG_PRINTF(" num row groups = %zd\n", row_groups.size()); LOG_PRINTF(" num columns = %zd\n", row_groups[0].columns.size()); } }; size_t reader::Impl::count_page_headers( const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks) { size_t total_pages = 0; CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice)); CUDA_TRY(parquet::gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size())); CUDA_TRY(cudaMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), cudaMemcpyDeviceToHost)); CUDA_TRY(cudaStreamSynchronize(0)); LOG_PRINTF("[+] Chunk Information\n"); for (size_t c = 0; c < chunks.size(); c++) { LOG_PRINTF( " %2zd: comp_data=%ld, comp_size=%zd, num_values=%zd\n " "start_row=%zd num_rows=%d max_def_level=%d max_rep_level=%d\n " "data_type=%d def_level_bits=%d rep_level_bits=%d\n " "num_data_pages=%d num_dict_pages=%d max_num_pages=%d\n", c, (uint64_t)chunks[c].compressed_data, chunks[c].compressed_size, chunks[c].num_values, chunks[c].start_row, chunks[c].num_rows, chunks[c].max_def_level, chunks[c].max_rep_level, chunks[c].data_type, chunks[c].def_level_bits, chunks[c].rep_level_bits, chunks[c].num_data_pages, chunks[c].num_dict_pages, chunks[c].max_num_pages); total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages; } return total_pages; } void reader::Impl::decode_page_headers( const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks, const hostdevice_vector<parquet::gpu::PageInfo> &pages) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages; chunks[c].page_info = pages.device_ptr(page_count); page_count += chunks[c].max_num_pages; } CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice)); CUDA_TRY(parquet::gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size())); CUDA_TRY(cudaMemcpyAsync(pages.host_ptr(), pages.device_ptr(), pages.memory_size(), cudaMemcpyDeviceToHost)); CUDA_TRY(cudaStreamSynchronize(0)); LOG_PRINTF("[+] Page Header Information\n"); for (size_t i = 0; i < pages.size(); i++) { LOG_PRINTF( " %2zd: comp_size=%d, uncomp_size=%d, num_values=%d, chunk_row=%d, " "num_rows=%d\n chunk_idx=%d, flags=%d, encoding=%d, def_level=%d " "rep_level=%d, valid_count=%d\n", i, pages[i].compressed_page_size, pages[i].uncompressed_page_size, pages[i].num_values, pages[i].chunk_row, pages[i].num_rows, pages[i].chunk_idx, pages[i].flags, pages[i].encoding, pages[i].definition_level_encoding, pages[i].repetition_level_encoding, pages[i].valid_count); } } device_buffer<uint8_t> reader::Impl::decompress_page_data( const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks, const hostdevice_vector<parquet::gpu::PageInfo> &pages) { auto for_each_codec_page = [&](parquet::Compression codec, const std::function<void(size_t)> &f) { for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { const auto page_stride = chunks[c].max_num_pages; if (chunks[c].codec == codec) { for (int k = 0; k < page_stride; k++) { f(page_count + k); } } page_count += page_stride; } }; // Brotli scratch memory for decompressing rmm::device_vector<uint8_t> debrotli_scratch; // Count the exact number of compressed pages size_t num_compressed_pages = 0; size_t total_decompressed_size = 0; std::array<std::pair<parquet::Compression, size_t>, 3> codecs{ std::make_pair(parquet::GZIP, 0), std::make_pair(parquet::SNAPPY, 0), std::make_pair(parquet::BROTLI, 0)}; for (auto &codec : codecs) { for_each_codec_page(codec.first, [&](size_t page) { total_decompressed_size += pages[page].uncompressed_page_size; codec.second++; num_compressed_pages++; }); if (codec.first == parquet::BROTLI && codec.second > 0) { debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.second)); } } LOG_PRINTF( "[+] Compression\n Total compressed size: %zd\n Number of " "compressed pages: %zd\n gzip: %zd \n snappy: %zd\n", total_decompressed_size, num_compressed_pages, codecs[0].second, codecs[1].second); // Dispatch batches of pages to decompress for each codec device_buffer<uint8_t> decomp_pages(total_decompressed_size); hostdevice_vector<gpu_inflate_input_s> inflate_in(0, num_compressed_pages); hostdevice_vector<gpu_inflate_status_s> inflate_out(0, num_compressed_pages); size_t decomp_offset = 0; int32_t argc = 0; for (const auto &codec : codecs) { if (codec.second > 0) { int32_t start_pos = argc; for_each_codec_page(codec.first, [&](size_t page) { inflate_in[argc].srcDevice = pages[page].page_data; inflate_in[argc].srcSize = pages[page].compressed_page_size; inflate_in[argc].dstDevice = decomp_pages.data() + decomp_offset; inflate_in[argc].dstSize = pages[page].uncompressed_page_size; inflate_out[argc].bytes_written = 0; inflate_out[argc].status = static_cast<uint32_t>(-1000); inflate_out[argc].reserved = 0; pages[page].page_data = (uint8_t *)inflate_in[argc].dstDevice; decomp_offset += inflate_in[argc].dstSize; argc++; }); CUDA_TRY(cudaMemcpyAsync( inflate_in.device_ptr(start_pos), inflate_in.host_ptr(start_pos), sizeof(decltype(inflate_in)::value_type) * (argc - start_pos), cudaMemcpyHostToDevice)); CUDA_TRY(cudaMemcpyAsync( inflate_out.device_ptr(start_pos), inflate_out.host_ptr(start_pos), sizeof(decltype(inflate_out)::value_type) * (argc - start_pos), cudaMemcpyHostToDevice)); switch (codec.first) { case parquet::GZIP: CUDA_TRY(gpuinflate(inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), argc - start_pos, 1)) break; case parquet::SNAPPY: CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), argc - start_pos)); break; case parquet::BROTLI: CUDA_TRY(gpu_debrotli(inflate_in.device_ptr(start_pos), inflate_out.device_ptr(start_pos), debrotli_scratch.data().get(), debrotli_scratch.size(), argc - start_pos)); break; default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break; } CUDA_TRY(cudaMemcpyAsync( inflate_out.host_ptr(start_pos), inflate_out.device_ptr(start_pos), sizeof(decltype(inflate_out)::value_type) * (argc - start_pos), cudaMemcpyDeviceToHost)); } } CUDA_TRY(cudaStreamSynchronize(0)); // Update the page information in device memory with the updated value of // page_data; it now points to the uncompressed data buffer CUDA_TRY(cudaMemcpyAsync(pages.device_ptr(), pages.host_ptr(), pages.memory_size(), cudaMemcpyHostToDevice)); return decomp_pages; } void reader::Impl::decode_page_data( const hostdevice_vector<parquet::gpu::ColumnChunkDesc> &chunks, const hostdevice_vector<parquet::gpu::PageInfo> &pages, const std::vector<gdf_column *> &chunk_map, size_t min_row, size_t total_rows) { auto is_dict_chunk = [](const parquet::gpu::ColumnChunkDesc &chunk) { return (chunk.data_type & 0x7) == parquet::BYTE_ARRAY && chunk.num_dict_pages > 0; }; // Count the number of string dictionary entries // NOTE: Assumes first page in the chunk is always the dictionary page size_t total_str_dict_indexes = 0; for (size_t c = 0, page_count = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { total_str_dict_indexes += pages[page_count].num_values; } page_count += chunks[c].max_num_pages; } // Build index for string dictionaries since they can't be indexed // directly due to variable-sized elements rmm::device_vector<parquet::gpu::nvstrdesc_s> str_dict_index; if (total_str_dict_indexes > 0) { str_dict_index.resize(total_str_dict_indexes); } // Update chunks with pointers to column data for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) { if (is_dict_chunk(chunks[c])) { chunks[c].str_dict_index = str_dict_index.data().get() + str_ofs; str_ofs += pages[page_count].num_values; } chunks[c].valid_map_base = (uint32_t *)chunk_map[c]->valid; chunks[c].column_data_base = chunk_map[c]->data; page_count += chunks[c].max_num_pages; } CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice)); if (total_str_dict_indexes > 0) { CUDA_TRY(BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size())); } CUDA_TRY(DecodePageData(pages.device_ptr(), pages.size(), chunks.device_ptr(), chunks.size(), total_rows, min_row)); CUDA_TRY(cudaMemcpyAsync(pages.host_ptr(), pages.device_ptr(), pages.memory_size(), cudaMemcpyDeviceToHost)); CUDA_TRY(cudaStreamSynchronize(0)); LOG_PRINTF("[+] Page Data Information\n"); for (size_t i = 0; i < pages.size(); i++) { if (pages[i].num_rows > 0) { LOG_PRINTF(" %2zd: valid_count=%d/%d\n", i, pages[i].valid_count, pages[i].num_rows); const size_t c = pages[i].chunk_idx; if (c < chunks.size()) { chunk_map[c]->null_count += pages[i].num_rows - pages[i].valid_count; } } } } reader::Impl::Impl(std::unique_ptr<DataSource> source, reader_options const &options) : source_(std::move(source)) { // Open and parse the source Parquet dataset metadata md_ = std::make_unique<ParquetMetadata>(source_.get()); // Store the index column (PANDAS-specific) index_col_ = md_->get_index_column_name(); // Select only columns required by the options selected_cols_ = md_->select_columns(options.columns, index_col_.c_str()); // Strings may be returned as either GDF_STRING or GDF_CATEGORY columns strings_to_categorical_ = options.strings_to_categorical; } table reader::Impl::read(int skip_rows, int num_rows, int row_group) { // Select only row groups required const auto selected_row_groups = md_->select_row_groups(row_group, skip_rows, num_rows); const auto num_columns = selected_cols_.size(); // Initialize gdf_columns, but hold off on allocating storage space LOG_PRINTF("[+] Selected row groups: %d\n", (int)selected_row_groups.size()); LOG_PRINTF("[+] Selected columns: %d\n", (int)num_columns); LOG_PRINTF("[+] Selected skip_rows: %d num_rows: %d\n", skip_rows, num_rows); std::vector<gdf_column_wrapper> columns; for (const auto &col : selected_cols_) { auto row_group_0 = md_->row_groups[selected_row_groups[0].first]; auto &col_schema = md_->schema[row_group_0.columns[col.first].schema_idx]; auto dtype_info = to_dtype(col_schema.type, col_schema.converted_type, strings_to_categorical_); columns.emplace_back(static_cast<gdf_size_type>(num_rows), dtype_info.first, dtype_info.second, col.second); LOG_PRINTF(" %2zd: name=%s size=%zd type=%d data=%lx valid=%lx\n", columns.size() - 1, columns.back()->col_name, (size_t)columns.back()->size, columns.back()->dtype, (uint64_t)columns.back()->data, (uint64_t)columns.back()->valid); } // Descriptors for all the chunks that make up the selected columns const auto num_column_chunks = selected_row_groups.size() * num_columns; hostdevice_vector<parquet::gpu::ColumnChunkDesc> chunks(0, num_column_chunks); // Association between each column chunk and its gdf_column std::vector<gdf_column *> chunk_map(num_column_chunks); // Tracker for eventually deallocating compressed and uncompressed data std::vector<device_buffer<uint8_t>> page_data; // Initialize column chunk info LOG_PRINTF("[+] Column Chunk Description\n"); size_t total_decompressed_size = 0; auto remaining_rows = num_rows; for (const auto &rg : selected_row_groups) { const auto row_group = md_->row_groups[rg.first]; const auto row_group_start = rg.second; const auto row_group_rows = std::min(remaining_rows, (int)row_group.num_rows); for (size_t i = 0; i < num_columns; ++i) { auto col = selected_cols_[i]; auto &col_meta = row_group.columns[col.first].meta_data; auto &col_schema = md_->schema[row_group.columns[col.first].schema_idx]; auto &gdf_column = columns[i]; // Spec requires each row group to contain exactly one chunk for every // column. If there are too many or too few, continue with best effort if (col.second != md_->get_column_name(col_meta.path_in_schema)) { std::cerr << "Detected mismatched column chunk" << std::endl; continue; } if (chunks.size() >= chunks.max_size()) { std::cerr << "Detected too many column chunks" << std::endl; continue; } int32_t type_width = (col_schema.type == parquet::FIXED_LEN_BYTE_ARRAY) ? (col_schema.type_length << 3) : 0; if (gdf_column->dtype == GDF_INT8) type_width = 1; // I32 -> I8 else if (gdf_column->dtype == GDF_INT16) type_width = 2; // I32 -> I16 else if (gdf_column->dtype == GDF_CATEGORY) type_width = 4; // str -> hash32 uint8_t *d_compdata = nullptr; if (col_meta.total_compressed_size != 0) { const auto offset = (col_meta.dictionary_page_offset != 0) ? std::min(col_meta.data_page_offset, col_meta.dictionary_page_offset) : col_meta.data_page_offset; page_data.emplace_back(col_meta.total_compressed_size); d_compdata = page_data.back().data(); const auto buffer = source_->get_buffer(offset, col_meta.total_compressed_size); CUDA_TRY(cudaMemcpyAsync(d_compdata, buffer->data(), col_meta.total_compressed_size, cudaMemcpyHostToDevice)); CUDA_TRY(cudaStreamSynchronize(0)); } chunks.insert(parquet::gpu::ColumnChunkDesc( col_meta.total_compressed_size, d_compdata, col_meta.num_values, col_schema.type, type_width, row_group_start, row_group_rows, col_schema.max_definition_level, col_schema.max_repetition_level, required_bits(col_schema.max_definition_level), required_bits(col_schema.max_repetition_level), col_meta.codec, col_schema.converted_type)); LOG_PRINTF( " %2d: %s start_row=%d, num_rows=%d, codec=%d, " "num_values=%ld\n total_compressed_size=%ld " "total_uncompressed_size=%ld\n schema_idx=%d, type=%d, " "type_width=%d, max_def_level=%d, " "max_rep_level=%d\n data_page_offset=%zd, index_page_offset=%zd, " "dict_page_offset=%zd\n", col.first, col.second.c_str(), row_group_start, row_group_rows, col_meta.codec, col_meta.num_values, col_meta.total_compressed_size, col_meta.total_uncompressed_size, row_group.columns[col.first].schema_idx, chunks[chunks.size() - 1].data_type, type_width, col_schema.max_definition_level, col_schema.max_repetition_level, (size_t)col_meta.data_page_offset, (size_t)col_meta.index_page_offset, (size_t)col_meta.dictionary_page_offset); // Map each column chunk to its output gdf_column chunk_map[chunks.size() - 1] = gdf_column.get(); if (col_meta.codec != parquet::Compression::UNCOMPRESSED) { total_decompressed_size += col_meta.total_uncompressed_size; } } remaining_rows -= row_group.num_rows; } assert(remaining_rows <= 0); // Allocate output memory and convert Parquet format into cuDF format const auto total_pages = count_page_headers(chunks); if (total_pages > 0) { hostdevice_vector<parquet::gpu::PageInfo> pages(total_pages, total_pages); decode_page_headers(chunks, pages); if (total_decompressed_size > 0) { auto decomp_page_data = decompress_page_data(chunks, pages); page_data.clear(); page_data.push_back(std::move(decomp_page_data)); } for (auto &column : columns) { CUDF_EXPECTS(column.allocate() == GDF_SUCCESS, "Cannot allocate columns"); } decode_page_data(chunks, pages, chunk_map, skip_rows, num_rows); } else { // Columns' data's memory is still expected for an empty dataframe for (auto &column : columns) { CUDF_EXPECTS(column.allocate() == GDF_SUCCESS, "Cannot allocate columns"); } } // For string dtype, allocate an NvStrings container instance, deallocating // the original string list memory in the process. // This container takes a list of string pointers and lengths, and copies // into its own memory so the source memory must not be released yet. for (auto &column : columns) { if (column->dtype == GDF_STRING) { using str_pair = std::pair<const char *, size_t>; using str_ptr = std::unique_ptr<NVStrings, decltype(&NVStrings::destroy)>; auto str_list = static_cast<str_pair *>(column->data); str_ptr str_data(NVStrings::create_from_index(str_list, num_rows), &NVStrings::destroy); RMM_FREE(std::exchange(column->data, str_data.release()), 0); } } // Transfer ownership to raw pointer output arguments std::vector<gdf_column *> out_cols(columns.size()); for (size_t i = 0; i < columns.size(); ++i) { out_cols[i] = columns[i].release(); } return table(out_cols.data(), out_cols.size()); } reader::reader(std::string filepath, reader_options const &options) : impl_(std::make_unique<Impl>( std::make_unique<DataSource>(filepath.c_str()), options)) {} reader::reader(const char *buffer, size_t length, reader_options const &options) : impl_(std::make_unique<Impl>( std::make_unique<DataSource>(buffer, length), options)) {} reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file, reader_options const &options) : impl_(std::make_unique<Impl>( std::make_unique<DataSource>(file), options)) {} std::string reader::get_index_column() { return impl_->get_index_column(); } table reader::read_all() { return impl_->read(0, -1, -1); } table reader::read_rows(size_t skip_rows, size_t num_rows) { return impl_->read(skip_rows, (num_rows != 0) ? (int)num_rows : -1, -1); } table reader::read_row_group(size_t row_group) { return impl_->read(0, -1, row_group); } reader::~reader() = default; } // namespace parquet } // namespace io } // namespace cudf
0903439ad6865b55bdd4b41b2704c150da95ac0b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <time.h> #include "config.h" #define TIMER_CREATE(t) \ hipEvent_t t##_start, t##_end; \ hipEventCreate(&t##_start); \ hipEventCreate(&t##_end); #define TIMER_START(t) \ hipEventRecord(t##_start); \ hipEventSynchronize(t##_start); \ #define TIMER_END(t) \ hipEventRecord(t##_end); \ hipEventSynchronize(t##_end); \ hipEventElapsedTime(&t, t##_start, t##_end); \ hipEventDestroy(t##_start); \ hipEventDestroy(t##_end); #define BLOCK_SIZE_1D 256 /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); exit(-1); } #endif return result; } // Add GPU kernel and functions // turn hist to shared __global__ void kernel(unsigned char *input, int *hist, unsigned int size){ register int location = blockIdx.x * blockDim.x+threadIdx.x; // calculate histogram if (location < size){ atomicAdd(&(hist[input[location]]),1); } } //// turn hist to shared memory //// for loop prefix sum has more consistant result __global__ void cum_calc( unsigned char *input, int *hist, unsigned int size){ register int location = threadIdx.x; register int space = 1; register int temp = 0; register int neighbor = 0; //use shared memory __shared__ int Cache[256]; Cache[location] = hist[location]; __syncthreads(); for (register int i = 0; i < 8; i++) { temp = Cache[location]; neighbor = 0; if ((location - space) >= 0) { neighbor = Cache[location - space]; } if (location >= space) { Cache[location] = temp + neighbor; } space = space * 2; __syncthreads(); } //write to result hist[location] = Cache[location]*255/size; } ///// port lookup to shared memor __global__ void equalize_output( unsigned char *input, int *lookup, unsigned int size){ register int location = blockIdx.x * blockDim.x+threadIdx.x; if (location < size){ input[location] = lookup[input[location]]; } } __global__ void print_hist(int *hist) { register int location =threadIdx.x; printf("pixel intensity: %d, value: %d\n",location,hist[location]); } __global__ void warmup(unsigned char *input, unsigned char *output){ int x = blockIdx.x*TILE_SIZE+threadIdx.x; int y = blockIdx.y*TILE_SIZE+threadIdx.y; int location = y*(gridDim.x*TILE_SIZE)+x; output[location] = 0; } // NOTE: The data passed on is already padded void gpu_function(unsigned char *data, unsigned int height, unsigned int width){ unsigned char *input_gpu; int *hist; unsigned int size_img = width*height; int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; int size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(hipMalloc((void**)&hist , 256*sizeof(int))); checkCuda(hipMemset(hist, 0 , 256*sizeof(int))); // Copy data to GPU checkCuda(hipMemcpy(input_gpu, data, size*sizeof(char), hipMemcpyHostToDevice)); checkCuda(hipDeviceSynchronize()); // Execute algorithm int gridSize1D = 1 + (( size_img - 1) / BLOCK_SIZE_1D); dim3 dimGrid1D(gridSize1D); dim3 dimBlock1D(BLOCK_SIZE_1D); // Kernel Call #ifdef CUDA_TIMING float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif printf("here:/n"); // Add more kernels and functions as needed here hipLaunchKernelGGL(( kernel), dim3(dimGrid1D), dim3(dimBlock1D), 0, 0, input_gpu, hist, size_img); //print_hist<<<1,256>>>(hist); hipLaunchKernelGGL(( cum_calc), dim3(1),dim3(256) , 0, 0, input_gpu, hist, size_img); hipLaunchKernelGGL(( equalize_output), dim3(dimGrid1D), dim3(dimBlock1D), 0, 0, input_gpu, hist, size_img); // From here on, no need to change anything checkCuda(hipPeekAtLastError()); checkCuda(hipDeviceSynchronize()); #ifdef CUDA_TIMING TIMER_END(Ktime); printf("Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(hipMemcpy(data, input_gpu, size*sizeof(unsigned char), hipMemcpyDeviceToHost)); // Free resources and end the program checkCuda(hipFree(input_gpu)); } void gpu_warmup(unsigned char *data, unsigned int height, unsigned int width){ unsigned char *input_gpu; unsigned char *output_gpu; int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). int size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char))); checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char))); // Copy data to GPU checkCuda(hipMemcpy(input_gpu, data, size*sizeof(char), hipMemcpyHostToDevice)); checkCuda(hipDeviceSynchronize()); // Execute algorithm dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(TILE_SIZE, TILE_SIZE); hipLaunchKernelGGL(( warmup), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu, output_gpu); checkCuda(hipDeviceSynchronize()); // Retrieve results from the GPU checkCuda(hipMemcpy(data, output_gpu, size*sizeof(unsigned char), hipMemcpyDeviceToHost)); // Free resources and end the program checkCuda(hipFree(output_gpu)); checkCuda(hipFree(input_gpu)); }
0903439ad6865b55bdd4b41b2704c150da95ac0b.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <time.h> #include "config.h" #define TIMER_CREATE(t) \ cudaEvent_t t##_start, t##_end; \ cudaEventCreate(&t##_start); \ cudaEventCreate(&t##_end); #define TIMER_START(t) \ cudaEventRecord(t##_start); \ cudaEventSynchronize(t##_start); \ #define TIMER_END(t) \ cudaEventRecord(t##_end); \ cudaEventSynchronize(t##_end); \ cudaEventElapsedTime(&t, t##_start, t##_end); \ cudaEventDestroy(t##_start); \ cudaEventDestroy(t##_end); #define BLOCK_SIZE_1D 256 /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); exit(-1); } #endif return result; } // Add GPU kernel and functions // turn hist to shared __global__ void kernel(unsigned char *input, int *hist, unsigned int size){ register int location = blockIdx.x * blockDim.x+threadIdx.x; // calculate histogram if (location < size){ atomicAdd(&(hist[input[location]]),1); } } //// turn hist to shared memory //// for loop prefix sum has more consistant result __global__ void cum_calc( unsigned char *input, int *hist, unsigned int size){ register int location = threadIdx.x; register int space = 1; register int temp = 0; register int neighbor = 0; //use shared memory __shared__ int Cache[256]; Cache[location] = hist[location]; __syncthreads(); for (register int i = 0; i < 8; i++) { temp = Cache[location]; neighbor = 0; if ((location - space) >= 0) { neighbor = Cache[location - space]; } if (location >= space) { Cache[location] = temp + neighbor; } space = space * 2; __syncthreads(); } //write to result hist[location] = Cache[location]*255/size; } ///// port lookup to shared memor __global__ void equalize_output( unsigned char *input, int *lookup, unsigned int size){ register int location = blockIdx.x * blockDim.x+threadIdx.x; if (location < size){ input[location] = lookup[input[location]]; } } __global__ void print_hist(int *hist) { register int location =threadIdx.x; printf("pixel intensity: %d, value: %d\n",location,hist[location]); } __global__ void warmup(unsigned char *input, unsigned char *output){ int x = blockIdx.x*TILE_SIZE+threadIdx.x; int y = blockIdx.y*TILE_SIZE+threadIdx.y; int location = y*(gridDim.x*TILE_SIZE)+x; output[location] = 0; } // NOTE: The data passed on is already padded void gpu_function(unsigned char *data, unsigned int height, unsigned int width){ unsigned char *input_gpu; int *hist; unsigned int size_img = width*height; int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; int size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&hist , 256*sizeof(int))); checkCuda(cudaMemset(hist, 0 , 256*sizeof(int))); // Copy data to GPU checkCuda(cudaMemcpy(input_gpu, data, size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); // Execute algorithm int gridSize1D = 1 + (( size_img - 1) / BLOCK_SIZE_1D); dim3 dimGrid1D(gridSize1D); dim3 dimBlock1D(BLOCK_SIZE_1D); // Kernel Call #ifdef CUDA_TIMING float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif printf("here:/n"); // Add more kernels and functions as needed here kernel<<<dimGrid1D, dimBlock1D>>>(input_gpu, hist, size_img); //print_hist<<<1,256>>>(hist); cum_calc<<<1,256 >>>( input_gpu, hist, size_img); equalize_output<<<dimGrid1D, dimBlock1D>>>(input_gpu, hist, size_img); // From here on, no need to change anything checkCuda(cudaPeekAtLastError()); checkCuda(cudaDeviceSynchronize()); #ifdef CUDA_TIMING TIMER_END(Ktime); printf("Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(cudaMemcpy(data, input_gpu, size*sizeof(unsigned char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(input_gpu)); } void gpu_warmup(unsigned char *data, unsigned int height, unsigned int width){ unsigned char *input_gpu; unsigned char *output_gpu; int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); int XSize = gridXSize*TILE_SIZE; int YSize = gridYSize*TILE_SIZE; // Both are the same size (CPU/GPU). int size = XSize*YSize; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char))); checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char))); // Copy data to GPU checkCuda(cudaMemcpy(input_gpu, data, size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); // Execute algorithm dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(TILE_SIZE, TILE_SIZE); warmup<<<dimGrid, dimBlock>>>(input_gpu, output_gpu); checkCuda(cudaDeviceSynchronize()); // Retrieve results from the GPU checkCuda(cudaMemcpy(data, output_gpu, size*sizeof(unsigned char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(output_gpu)); checkCuda(cudaFree(input_gpu)); }
7ba9365f54c7a99b18cab6042a4ccf17602aecf2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "Loss.h" #include "Loss.cuh" #include "../XDevice.h" #include "../core/math/Power.h" #include "../core/math/ScaleAndShift.h" #include "../core/math/Unary.h" #include "../core/arithmetic/Negate.h" #include "../core/arithmetic/Sum.h" #include "../core/arithmetic/Multiply.h" #include "../core/reduce/ReduceSum.h" #include "../core/movement/CopyValues.h" namespace nts{ // namespace nts(NiuTrans.Tensor) #ifdef USE_ROCM /* loss function to measure the "number" of errors */ /* compute the loss >> gold - gold standard >> y - model prediction >> LFName - name of loss function >> isLogOutput - is the output in log scale? >> leadDim - the leading dimension for the output >> gBeg - where to start in the gold standard (along the leading dimension) >> gLen - segment length from oBeg (along the leading dimension) >> yBeg - where to start in the model output (along the leading dimension) << return - error in model prediction with respect to gold standard */ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName, bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg) { CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!"); CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The input tensors must be of the same size!"); CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!"); CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!"); CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE), "TODO!"); CheckNTErrors((gold->devID == y->devID), "Tensors must be on the same device!"); CheckNTErrors((gold->devID >= 0), "Tensors must be on GPU device!"); CheckNTErrors((gLen == gold->dimSize[leadDim] && gBeg == 0 && yBeg == 0), "TODO!"); if(isLogOutput) return _LossComputeForLogScale(gold, y, LFName, leadDim, gBeg, gLen, yBeg); DTYPE error = 0.0F; /* squared error loss = sum_{i} 0.5*(gold_i - output_i)^2 where gold_i is the gold standard and output_i is the model prediction */ if(LFName == SQUAREDERROR){ XTensor * diff = NewTensor(gold->order, gold->dimSize, gold->dataType, gold->denseRatio, gold->devID, gold->mem); _Sum(gold, y, diff, -1.0F); _PowerMe(diff, 2.0F); _ScaleAndShiftMe(diff, 0.5F, 0.0F); int reduceTimes = diff->order; for (int i = 0; i < reduceTimes; i++) { int diffOrder = diff->order - 1; int * diffDimSize = new int[diffOrder]; memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int)); XTensor * diffNew = NewTensor(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem); int reducePlace = diff->dimSize[0] == 1 ? 1 : 0; _ReduceSum(diff, diffNew, reducePlace); if (diffNew->order == 1) { diffNew->order = 2; diffNew->dimSize[1] = diffNew->dimSize[0]; diffNew->dimSize[0] = 1; diffNew->dimSizeRDI[1] = 1; } delete diff; diff = diffNew; delete diffDimSize; } error = diff->Get2D(0, 0); delete diff; } /* cross entropy loss = sum_{i} (-gold_i * log(output_i)) where gold and output are distributions */ if(LFName == CROSSENTROPY){ XTensor * diff = NewTensor(y->order, y->dimSize, y->dataType, y->denseRatio, y->devID, y->mem); _CopyValues(y, diff); _LogMe(diff); _Multiply(gold, diff, diff); _NegateMe(diff); int reduceTimes = diff->order; for (int i = 0; i < reduceTimes; i++) { int diffOrder = diff->order - 1; int * diffDimSize = new int[diffOrder]; memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int)); XTensor * diffNew = NewTensor(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem); int reducePlace = diff->dimSize[0] == 1 ? 1 : 0; _ReduceSum(diff, diffNew, reducePlace); if (diffNew->order == 1) { diffNew->order = 2; diffNew->dimSize[1] = diffNew->dimSize[0]; diffNew->dimSize[0] = 1; diffNew->dimSizeRDI[1] = 1; } delete diff; diff = diffNew; delete diffDimSize; } error = diff->Get2D(0, 0); delete diff; } /* one hot error loss = sum_{i} e_i where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise */ if(LFName == ONEHOTERROR){ XTensor * diff = NewTensor(gold->order, gold->dimSize, gold->dataType, gold->denseRatio, gold->devID, gold->mem); XTensor * yOnehot = NewTensor(y->order, y->dimSize, y->dataType, y->denseRatio, y->devID, y->mem); _CopyValues(y, yOnehot); _Multiply(gold, y, yOnehot); _Sum(gold, yOnehot, diff, -1.0F); _PowerMe(diff, 2.0F); _ScaleAndShiftMe(diff, 0.5F, 0.0F); int reduceTimes = diff->order; for (int i = 0; i < reduceTimes; i++) { int diffOrder = diff->order - 1; int * diffDimSize = new int[diffOrder]; memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int)); XTensor * diffNew = NewTensor(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem); int reducePlace = diff->dimSize[0] == 1 ? 1 : 0; _ReduceSum(diff, diffNew, reducePlace); if (diffNew->order == 1) { diffNew->order = 2; diffNew->dimSize[1] = diffNew->dimSize[0]; diffNew->dimSize[0] = 1; diffNew->dimSizeRDI[1] = 1; } delete diff; diff = diffNew; delete diffDimSize; } error = diff->Get2D(0, 0); delete diff; delete yOnehot; } return error; // TODO: call cuda kernels for computing the errors } /* the log version of loss computation >> gold - gold standard >> y - model prediction >> LFName - name of loss function >> leadDim - the leading dimension for the output >> gBeg - where to start in the gold standard (along the leading dimension) >> gLen - segment length from oBeg (along the leading dimension) >> yBeg - where to start in the model output (along the leading dimension) << return - error in model prediction with respect to gold standard */ DTYPE _CudaLossComputeForLogScale(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName, int leadDim, int gBeg, int gLen, int yBeg) { return 0; // TODO: call cuda kernels for computing the errors } /* backward compuation for a single element (Cuda version) dE/dy where E is the error(loss) function that measure the errors in y with respect to gold standard, and y this the model output >> t - gold standard >> y - model output >> LFName - name of loss function << return dE/dy */ DTYPE _CudaLossBackward(DTYPE t, DTYPE y, LOSS_FUNCTION_NAME LFName) { return _LossBackwardPoint(t, y, LFName); // TODO: call cuda kernels for computing the errors } /* backward compuation for squared error (Cuda kernel) >> dedy - dE/dy (for return) >> t - gold standard (in vector) >> y - model output (in vector) >> size - size of the vector (dedy) */ __global__ void KernelLossBackwardSquaredError(DTYPE * dedy, DTYPE * t, DTYPE * y, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size){ dedy[i] = y[i] - t[i]; } } /* backward compuation of blocks for squared error (Cuda kernel) >> dedy - dE/dy (for return) >> t - gold standard (in vector) >> y - model output (in vector) >> blockSize - size of a block >> begInBlock - the begining position in a block for computation >> lenInBlock - number of items in a block for computation >> size - size of the vector (dedy) */ __global__ void KernelLossBackwardSquaredErrorBlock(DTYPE * dedy, DTYPE * t, DTYPE * y, int blockSize, int begInBlock, int lenInBlock, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int offset = i % blockSize; if(offset < begInBlock || offset >= begInBlock + lenInBlock) return; if (i < size){ dedy[i] = y[i] - t[i]; } } /* backward compuation for cross entropy (Cuda kernel) >> dedy - dE/dy (for return) >> t - gold standard (in vector) >> y - model output (in vector) >> size - size of the vector (dedy) */ __global__ void KernelLossBackwardCrossEntropy(DTYPE * dedy, DTYPE * t, DTYPE * y, int tBeg, int tLen, int yBeg, int blockNum, int stride, int dimensionSize) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i > stride * dimensionSize * blockNum) return; int blockNumIndex = i / (stride * dimensionSize); int blockNumTail = i % (stride * dimensionSize); int dimensionSizeIndex = blockNumTail / stride; int strideIndex = blockNumTail % stride; if (dimensionSizeIndex >= tLen) return; dedy[blockNumIndex * stride * dimensionSize + strideIndex + stride * (yBeg + dimensionSizeIndex)] = -t[blockNumIndex * stride * dimensionSize + strideIndex + stride * (tBeg + dimensionSizeIndex)] / y[blockNumIndex * stride * dimensionSize + strideIndex + stride * (yBeg + dimensionSizeIndex)]; /*if (i < size){ dedy[i] = -t[i]/y[i]; }*/ } /* backward compuation for cross entropy (Cuda kernel) >> dedy - dE/dy (for return) >> t - gold standard (in vector) >> y - model output (in vector) >> blockSize - size of a block >> begInBlock - the begining position in a block for computation >> lenInBlock - number of items in a block for computation >> size - size of the vector (dedy) */ __global__ void KernelLossBackwardCrossEntropyBlock(DTYPE * dedy, DTYPE * t, DTYPE * y, int blockSize, int begInBlock, int lenInBlock, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int offset = i % blockSize; if(offset < begInBlock || offset >= begInBlock + lenInBlock) return; if (i < size){ dedy[i] = -t[i]/y[i]; } } /* backward compuation for (dense) vectors (Cuda version) dE/dy where E is the error(loss) function that measure the errors in y with respect to gold standard, and y this the model output >> dedy - dE/dy (for return) >> t - gold standard (in vector) >> y - model output (in vector) >> LFName - name of loss function >> leadDim - the leading dimension for the output >> tBeg - where to start in the gold standard (along the leading dimension) >> tLen - segment length from oBeg (along the leading dimension) >> yBeg - where to start in the model output (along the leading dimension) */ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y, LOSS_FUNCTION_NAME LFName, int leadDim, int tBeg, int tLen, int yBeg) { CheckNTErrors((tLen <= y->unitNum), "Illegal input length!"); CheckNTErrors((XTensor::IsSameShaped(t, y)&& XTensor::IsSameShaped(dedy, y)), "The input tensors must be of the same size!"); CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)), "Tensor must be on the same device!"); CheckNTErrors((t->order > leadDim), "Illegal leading dimension!"); CheckNTErrors((t->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE && dedy->dataType == DEFAULT_DTYPE), "Input vectors are not in default type."); CheckNTErrors((dedy->devID >= 0 && t->devID >= 0 && y->devID >= 0), "The backward compuation must be performed on GPUs."); CheckNTErrors((dedy->devID == t->devID && dedy->devID == y->devID), "The vectors must be on the same GPU."); CheckNTErrors((tBeg == yBeg), "TODO!"); int leadDimRDI = leadDim >= 0 ? y->order - leadDim - 1 : -1; if(leadDimRDI < 0){ leadDimRDI = y->order - 1; tBeg = 0; yBeg = 0; tLen = y->dimSizeRDI[leadDimRDI]; } int dimensionSize = y->dimSizeRDI[leadDimRDI]; int stride = 1; int blockSize = 1; int blockNum = 1; int size = 1; for(int i = 0; i < leadDimRDI; i++) stride *= y->dimSizeRDI[i]; size = tLen * stride; blockSize = stride * dimensionSize; blockNum = y->unitNum / blockSize; int cudaGridSize[3], cudaBlockSize[3]; GDevs.GetCudaThread(dedy->devID, y->unitNum, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0]); dim3 threads(cudaBlockSize[0]); DTYPE * tp = (DTYPE*)t->data; DTYPE * yp = (DTYPE*)y->data; DTYPE * dedyp = (DTYPE*)dedy->data; int devIDBackup; ProtectCudaDev(y->devID, devIDBackup); /* squared error loss = sum_{i} 0.5*(t_i - y_i)^2, where t_i is the gold standard and y_i is the model output dloss/dy_i = y_i - t_i */ if(LFName == SQUAREDERROR){ if(t->isSparse){ ShowNTErrors("TODO!"); } else if(size == y->unitNum){ hipLaunchKernelGGL(( KernelLossBackwardSquaredError), dim3(blocks), dim3(threads), 0, 0, dedyp, tp, yp, y->unitNum); } else{ hipLaunchKernelGGL(( KernelLossBackwardSquaredErrorBlock), dim3(blocks), dim3(threads), 0, 0, dedyp, tp, yp, blockSize, tBeg * stride, tLen * stride, y->unitNum); } } /* cross entropy loss = sum_{i} (-t_i * log(y_i)), where t and y are distributions dloss/dy_i = -t_i / y_i */ else if(LFName == CROSSENTROPY){ if(t->isSparse){ ShowNTErrors("TODO!"); } else if(size == y->unitNum){ hipLaunchKernelGGL(( KernelLossBackwardCrossEntropy), dim3(blocks), dim3(threads), 0, 0, dedyp, tp, yp, tBeg, tLen, yBeg, blockNum, stride, dimensionSize); } else{ hipLaunchKernelGGL(( KernelLossBackwardCrossEntropyBlock), dim3(blocks), dim3(threads), 0, 0, dedyp, tp, yp, blockSize, tBeg * stride, tLen * stride, y->unitNum); } } else{ ShowNTErrors("TODO"); } BacktoCudaDev(y->devID, devIDBackup); } #endif } // namespace nts(NiuTrans.Tensor)
7ba9365f54c7a99b18cab6042a4ccf17602aecf2.cu
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "Loss.h" #include "Loss.cuh" #include "../XDevice.h" #include "../core/math/Power.h" #include "../core/math/ScaleAndShift.h" #include "../core/math/Unary.h" #include "../core/arithmetic/Negate.h" #include "../core/arithmetic/Sum.h" #include "../core/arithmetic/Multiply.h" #include "../core/reduce/ReduceSum.h" #include "../core/movement/CopyValues.h" namespace nts{ // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* loss function to measure the "number" of errors */ /* compute the loss >> gold - gold standard >> y - model prediction >> LFName - name of loss function >> isLogOutput - is the output in log scale? >> leadDim - the leading dimension for the output >> gBeg - where to start in the gold standard (along the leading dimension) >> gLen - segment length from oBeg (along the leading dimension) >> yBeg - where to start in the model output (along the leading dimension) << return - error in model prediction with respect to gold standard */ DTYPE _CudaLossCompute(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName, bool isLogOutput, int leadDim, int gBeg, int gLen, int yBeg) { CheckNTErrors((gLen >= 0 && gLen <= y->unitNum), "Illegal input length!"); CheckNTErrors((XTensor::IsSameShaped(gold, y)), "The input tensors must be of the same size!"); CheckNTErrors((gold->dimSizeRDI[0] == 1 && y->dimSizeRDI[0] == 1), "TODO!"); CheckNTErrors((gold->order > leadDim && leadDim >= 0), "Illegal leading dimension!"); CheckNTErrors((gold->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE), "TODO!"); CheckNTErrors((gold->devID == y->devID), "Tensors must be on the same device!"); CheckNTErrors((gold->devID >= 0), "Tensors must be on GPU device!"); CheckNTErrors((gLen == gold->dimSize[leadDim] && gBeg == 0 && yBeg == 0), "TODO!"); if(isLogOutput) return _LossComputeForLogScale(gold, y, LFName, leadDim, gBeg, gLen, yBeg); DTYPE error = 0.0F; /* squared error loss = sum_{i} 0.5*(gold_i - output_i)^2 where gold_i is the gold standard and output_i is the model prediction */ if(LFName == SQUAREDERROR){ XTensor * diff = NewTensor(gold->order, gold->dimSize, gold->dataType, gold->denseRatio, gold->devID, gold->mem); _Sum(gold, y, diff, -1.0F); _PowerMe(diff, 2.0F); _ScaleAndShiftMe(diff, 0.5F, 0.0F); int reduceTimes = diff->order; for (int i = 0; i < reduceTimes; i++) { int diffOrder = diff->order - 1; int * diffDimSize = new int[diffOrder]; memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int)); XTensor * diffNew = NewTensor(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem); int reducePlace = diff->dimSize[0] == 1 ? 1 : 0; _ReduceSum(diff, diffNew, reducePlace); if (diffNew->order == 1) { diffNew->order = 2; diffNew->dimSize[1] = diffNew->dimSize[0]; diffNew->dimSize[0] = 1; diffNew->dimSizeRDI[1] = 1; } delete diff; diff = diffNew; delete diffDimSize; } error = diff->Get2D(0, 0); delete diff; } /* cross entropy loss = sum_{i} (-gold_i * log(output_i)) where gold and output are distributions */ if(LFName == CROSSENTROPY){ XTensor * diff = NewTensor(y->order, y->dimSize, y->dataType, y->denseRatio, y->devID, y->mem); _CopyValues(y, diff); _LogMe(diff); _Multiply(gold, diff, diff); _NegateMe(diff); int reduceTimes = diff->order; for (int i = 0; i < reduceTimes; i++) { int diffOrder = diff->order - 1; int * diffDimSize = new int[diffOrder]; memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int)); XTensor * diffNew = NewTensor(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem); int reducePlace = diff->dimSize[0] == 1 ? 1 : 0; _ReduceSum(diff, diffNew, reducePlace); if (diffNew->order == 1) { diffNew->order = 2; diffNew->dimSize[1] = diffNew->dimSize[0]; diffNew->dimSize[0] = 1; diffNew->dimSizeRDI[1] = 1; } delete diff; diff = diffNew; delete diffDimSize; } error = diff->Get2D(0, 0); delete diff; } /* one hot error loss = sum_{i} e_i where e_i = 0.5*(t_i - y_i)^2 if t_i = 1, e_i = 0 otherwise */ if(LFName == ONEHOTERROR){ XTensor * diff = NewTensor(gold->order, gold->dimSize, gold->dataType, gold->denseRatio, gold->devID, gold->mem); XTensor * yOnehot = NewTensor(y->order, y->dimSize, y->dataType, y->denseRatio, y->devID, y->mem); _CopyValues(y, yOnehot); _Multiply(gold, y, yOnehot); _Sum(gold, yOnehot, diff, -1.0F); _PowerMe(diff, 2.0F); _ScaleAndShiftMe(diff, 0.5F, 0.0F); int reduceTimes = diff->order; for (int i = 0; i < reduceTimes; i++) { int diffOrder = diff->order - 1; int * diffDimSize = new int[diffOrder]; memcpy(diffDimSize, diff->dimSize + 1, diffOrder * sizeof(int)); XTensor * diffNew = NewTensor(diffOrder, diffDimSize, X_FLOAT, 1.0F, diff->devID, diff->mem); int reducePlace = diff->dimSize[0] == 1 ? 1 : 0; _ReduceSum(diff, diffNew, reducePlace); if (diffNew->order == 1) { diffNew->order = 2; diffNew->dimSize[1] = diffNew->dimSize[0]; diffNew->dimSize[0] = 1; diffNew->dimSizeRDI[1] = 1; } delete diff; diff = diffNew; delete diffDimSize; } error = diff->Get2D(0, 0); delete diff; delete yOnehot; } return error; // TODO: call cuda kernels for computing the errors } /* the log version of loss computation >> gold - gold standard >> y - model prediction >> LFName - name of loss function >> leadDim - the leading dimension for the output >> gBeg - where to start in the gold standard (along the leading dimension) >> gLen - segment length from oBeg (along the leading dimension) >> yBeg - where to start in the model output (along the leading dimension) << return - error in model prediction with respect to gold standard */ DTYPE _CudaLossComputeForLogScale(XTensor * gold, XTensor * y, LOSS_FUNCTION_NAME LFName, int leadDim, int gBeg, int gLen, int yBeg) { return 0; // TODO: call cuda kernels for computing the errors } /* backward compuation for a single element (Cuda version) dE/dy where E is the error(loss) function that measure the errors in y with respect to gold standard, and y this the model output >> t - gold standard >> y - model output >> LFName - name of loss function << return dE/dy */ DTYPE _CudaLossBackward(DTYPE t, DTYPE y, LOSS_FUNCTION_NAME LFName) { return _LossBackwardPoint(t, y, LFName); // TODO: call cuda kernels for computing the errors } /* backward compuation for squared error (Cuda kernel) >> dedy - dE/dy (for return) >> t - gold standard (in vector) >> y - model output (in vector) >> size - size of the vector (dedy) */ __global__ void KernelLossBackwardSquaredError(DTYPE * dedy, DTYPE * t, DTYPE * y, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size){ dedy[i] = y[i] - t[i]; } } /* backward compuation of blocks for squared error (Cuda kernel) >> dedy - dE/dy (for return) >> t - gold standard (in vector) >> y - model output (in vector) >> blockSize - size of a block >> begInBlock - the begining position in a block for computation >> lenInBlock - number of items in a block for computation >> size - size of the vector (dedy) */ __global__ void KernelLossBackwardSquaredErrorBlock(DTYPE * dedy, DTYPE * t, DTYPE * y, int blockSize, int begInBlock, int lenInBlock, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int offset = i % blockSize; if(offset < begInBlock || offset >= begInBlock + lenInBlock) return; if (i < size){ dedy[i] = y[i] - t[i]; } } /* backward compuation for cross entropy (Cuda kernel) >> dedy - dE/dy (for return) >> t - gold standard (in vector) >> y - model output (in vector) >> size - size of the vector (dedy) */ __global__ void KernelLossBackwardCrossEntropy(DTYPE * dedy, DTYPE * t, DTYPE * y, int tBeg, int tLen, int yBeg, int blockNum, int stride, int dimensionSize) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i > stride * dimensionSize * blockNum) return; int blockNumIndex = i / (stride * dimensionSize); int blockNumTail = i % (stride * dimensionSize); int dimensionSizeIndex = blockNumTail / stride; int strideIndex = blockNumTail % stride; if (dimensionSizeIndex >= tLen) return; dedy[blockNumIndex * stride * dimensionSize + strideIndex + stride * (yBeg + dimensionSizeIndex)] = -t[blockNumIndex * stride * dimensionSize + strideIndex + stride * (tBeg + dimensionSizeIndex)] / y[blockNumIndex * stride * dimensionSize + strideIndex + stride * (yBeg + dimensionSizeIndex)]; /*if (i < size){ dedy[i] = -t[i]/y[i]; }*/ } /* backward compuation for cross entropy (Cuda kernel) >> dedy - dE/dy (for return) >> t - gold standard (in vector) >> y - model output (in vector) >> blockSize - size of a block >> begInBlock - the begining position in a block for computation >> lenInBlock - number of items in a block for computation >> size - size of the vector (dedy) */ __global__ void KernelLossBackwardCrossEntropyBlock(DTYPE * dedy, DTYPE * t, DTYPE * y, int blockSize, int begInBlock, int lenInBlock, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; int offset = i % blockSize; if(offset < begInBlock || offset >= begInBlock + lenInBlock) return; if (i < size){ dedy[i] = -t[i]/y[i]; } } /* backward compuation for (dense) vectors (Cuda version) dE/dy where E is the error(loss) function that measure the errors in y with respect to gold standard, and y this the model output >> dedy - dE/dy (for return) >> t - gold standard (in vector) >> y - model output (in vector) >> LFName - name of loss function >> leadDim - the leading dimension for the output >> tBeg - where to start in the gold standard (along the leading dimension) >> tLen - segment length from oBeg (along the leading dimension) >> yBeg - where to start in the model output (along the leading dimension) */ void _CudaLossBackward(XTensor * dedy, XTensor * t, XTensor * y, LOSS_FUNCTION_NAME LFName, int leadDim, int tBeg, int tLen, int yBeg) { CheckNTErrors((tLen <= y->unitNum), "Illegal input length!"); CheckNTErrors((XTensor::IsSameShaped(t, y)&& XTensor::IsSameShaped(dedy, y)), "The input tensors must be of the same size!"); CheckNTErrors(((dedy->devID == t->devID) && (dedy->devID == y->devID)), "Tensor must be on the same device!"); CheckNTErrors((t->order > leadDim), "Illegal leading dimension!"); CheckNTErrors((t->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE && dedy->dataType == DEFAULT_DTYPE), "Input vectors are not in default type."); CheckNTErrors((dedy->devID >= 0 && t->devID >= 0 && y->devID >= 0), "The backward compuation must be performed on GPUs."); CheckNTErrors((dedy->devID == t->devID && dedy->devID == y->devID), "The vectors must be on the same GPU."); CheckNTErrors((tBeg == yBeg), "TODO!"); int leadDimRDI = leadDim >= 0 ? y->order - leadDim - 1 : -1; if(leadDimRDI < 0){ leadDimRDI = y->order - 1; tBeg = 0; yBeg = 0; tLen = y->dimSizeRDI[leadDimRDI]; } int dimensionSize = y->dimSizeRDI[leadDimRDI]; int stride = 1; int blockSize = 1; int blockNum = 1; int size = 1; for(int i = 0; i < leadDimRDI; i++) stride *= y->dimSizeRDI[i]; size = tLen * stride; blockSize = stride * dimensionSize; blockNum = y->unitNum / blockSize; int cudaGridSize[3], cudaBlockSize[3]; GDevs.GetCudaThread(dedy->devID, y->unitNum, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[0]); dim3 threads(cudaBlockSize[0]); DTYPE * tp = (DTYPE*)t->data; DTYPE * yp = (DTYPE*)y->data; DTYPE * dedyp = (DTYPE*)dedy->data; int devIDBackup; ProtectCudaDev(y->devID, devIDBackup); /* squared error loss = sum_{i} 0.5*(t_i - y_i)^2, where t_i is the gold standard and y_i is the model output dloss/dy_i = y_i - t_i */ if(LFName == SQUAREDERROR){ if(t->isSparse){ ShowNTErrors("TODO!"); } else if(size == y->unitNum){ KernelLossBackwardSquaredError<<<blocks, threads>>>(dedyp, tp, yp, y->unitNum); } else{ KernelLossBackwardSquaredErrorBlock<<<blocks, threads>>>(dedyp, tp, yp, blockSize, tBeg * stride, tLen * stride, y->unitNum); } } /* cross entropy loss = sum_{i} (-t_i * log(y_i)), where t and y are distributions dloss/dy_i = -t_i / y_i */ else if(LFName == CROSSENTROPY){ if(t->isSparse){ ShowNTErrors("TODO!"); } else if(size == y->unitNum){ KernelLossBackwardCrossEntropy<<<blocks, threads>>>(dedyp, tp, yp, tBeg, tLen, yBeg, blockNum, stride, dimensionSize); } else{ KernelLossBackwardCrossEntropyBlock<<<blocks, threads>>>(dedyp, tp, yp, blockSize, tBeg * stride, tLen * stride, y->unitNum); } } else{ ShowNTErrors("TODO"); } BacktoCudaDev(y->devID, devIDBackup); } #endif } // namespace nts(NiuTrans.Tensor)
92636b5d1f5ead624ceb66fa3d2454f835d6e2ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void elementwise_1D_1D_exp(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = exp(in[tid]); }
92636b5d1f5ead624ceb66fa3d2454f835d6e2ee.cu
#include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void elementwise_1D_1D_exp(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = exp(in[tid]); }
b2789284f83b8d98aa2f1418e6c79a6d27d12ab8.hip
// !!! This is a file automatically generated by hipify!!! //====================================== // // GPU //====================================== #include"stdafx.h" #include"ExponentialNormalization_LayerData_GPU.cuh" #include"ExponentialNormalization_FUNC.hpp" #include"ExponentialNormalization_GPU.cuh" #include"Library/NeuralNetwork/Optimizer.h" #include"../_LayerBase/CLayerBase_GPU.cuh" using namespace Gravisbell; namespace Gravisbell { namespace Layer { namespace NeuralNetwork { //=========================== // / //=========================== /** */ ExponentialNormalization_LayerData_GPU::ExponentialNormalization_LayerData_GPU(const Gravisbell::GUID& guid) : ExponentialNormalization_LayerData_Base(guid) { } /** */ ExponentialNormalization_LayerData_GPU::~ExponentialNormalization_LayerData_GPU() { } //=========================== // //=========================== /** . @return 0 */ ErrorCode ExponentialNormalization_LayerData_GPU::Initialize(void) { this->lpMean.resize(this->layerStructure.InputChannelCount); this->lpVariance.resize(this->layerStructure.InputChannelCount); for(U32 ch=0; ch<this->layerStructure.InputChannelCount; ch++) { this->lpMean[ch] = 0.0f; this->lpVariance[ch] = 1.0f; } this->learnTime = 0; /**< */ return ErrorCode::ERROR_CODE_NONE; } /** . @param i_config @oaram i_inputDataStruct @return 0 */ ErrorCode ExponentialNormalization_LayerData_GPU::Initialize(const SettingData::Standard::IData& i_data) { ErrorCode err; // err = this->SetLayerConfig(i_data); if(err != ErrorCode::ERROR_CODE_NONE) return err; // err = this->Initialize(); if(err != ErrorCode::ERROR_CODE_NONE) return err; // err = this->ChangeOptimizer(L"SGD"); if(err != ErrorCode::ERROR_CODE_NONE) return err; return ErrorCode::ERROR_CODE_NONE; } /** . @param i_lpBuffer . @param i_bufferSize . @return 0 */ ErrorCode ExponentialNormalization_LayerData_GPU::InitializeFromBuffer(const BYTE* i_lpBuffer, U64 i_bufferSize, S64& o_useBufferSize ) { S64 readBufferByte = 0; // S64 useBufferByte = 0; SettingData::Standard::IData* pLayerStructure = CreateLayerStructureSettingFromBuffer(&i_lpBuffer[readBufferByte], i_bufferSize, useBufferByte); if(pLayerStructure == NULL) return ErrorCode::ERROR_CODE_INITLAYER_READ_CONFIG; readBufferByte += useBufferByte; this->SetLayerConfig(*pLayerStructure); delete pLayerStructure; // this->Initialize(); // hipMemcpy(thrust::raw_pointer_cast(&this->lpMean[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpMean.size(), hipMemcpyHostToDevice); readBufferByte += sizeof(F32)*(U32)this->lpMean.size(); // hipMemcpy(thrust::raw_pointer_cast(&this->lpVariance[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpMean.size(), hipMemcpyHostToDevice); readBufferByte += sizeof(F32)*(U32)this->lpMean.size(); // memcpy(&this->learnTime, &i_lpBuffer[readBufferByte], sizeof(this->learnTime)); readBufferByte += sizeof(this->learnTime); o_useBufferSize = readBufferByte; return ErrorCode::ERROR_CODE_NONE; } //=========================== // //=========================== /** . @param o_lpBuffer . GetUseBufferByteCount @return . */ S64 ExponentialNormalization_LayerData_GPU::WriteToBuffer(BYTE* o_lpBuffer)const { if(this->pLayerStructure == NULL) return ErrorCode::ERROR_CODE_NONREGIST_CONFIG; S64 writeBufferByte = 0; // writeBufferByte += this->pLayerStructure->WriteToBuffer(&o_lpBuffer[writeBufferByte]); // hipMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpMean[0]), sizeof(F32)*this->lpMean.size(), hipMemcpyDeviceToHost); writeBufferByte += sizeof(F32)*(U32)this->lpMean.size(); // hipMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpVariance[0]), sizeof(F32)*this->lpVariance.size(), hipMemcpyDeviceToHost); writeBufferByte += sizeof(F32)*(U32)this->lpVariance.size(); // memcpy(&o_lpBuffer[writeBufferByte], &this->learnTime, sizeof(this->learnTime)); writeBufferByte += sizeof(U64); return writeBufferByte; } //=========================== // //=========================== /** . @param guid GUID. */ ILayerBase* ExponentialNormalization_LayerData_GPU::CreateLayer(const Gravisbell::GUID& guid, const IODataStruct i_lpInputDataStruct[], U32 i_inputLayerCount, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager) { if(this->CheckCanUseInputDataStruct(i_lpInputDataStruct, i_inputLayerCount) == false) return NULL; return new CNNSingle2SingleLayerBase_GPU<ExponentialNormalization_GPU, ExponentialNormalization_LayerData_GPU>(guid, *this, i_lpInputDataStruct[0], i_temporaryMemoryManager); } //=========================== // //=========================== /** */ ErrorCode ExponentialNormalization_LayerData_GPU::ChangeOptimizer(const wchar_t i_optimizerID[]) { return ErrorCode::ERROR_CODE_NONE; } } // Gravisbell; } // Layer; } // NeuralNetwork; /** Create a layer for GPU processing. * @param GUID of layer to create. */ EXPORT_API Gravisbell::Layer::ILayerData* CreateLayerDataGPU(const Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager, Gravisbell::GUID guid, const Gravisbell::SettingData::Standard::IData& i_data) { // Gravisbell::Layer::NeuralNetwork::ExponentialNormalization_LayerData_GPU* pLayerData = new Gravisbell::Layer::NeuralNetwork::ExponentialNormalization_LayerData_GPU(guid); if(pLayerData == NULL) return NULL; // Gravisbell::ErrorCode errCode = pLayerData->Initialize(i_data); if(errCode != Gravisbell::ErrorCode::ERROR_CODE_NONE) { delete pLayerData; return NULL; } return pLayerData; } EXPORT_API Gravisbell::Layer::ILayerData* CreateLayerDataGPUfromBuffer(const Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager, Gravisbell::GUID guid, const BYTE* i_lpBuffer, S64 i_bufferSize, S64& o_useBufferSize) { // Gravisbell::Layer::NeuralNetwork::ExponentialNormalization_LayerData_GPU* pLayerData = new Gravisbell::Layer::NeuralNetwork::ExponentialNormalization_LayerData_GPU(guid); if(pLayerData == NULL) return NULL; // S64 useBufferSize = 0; Gravisbell::ErrorCode errCode = pLayerData->InitializeFromBuffer(i_lpBuffer, i_bufferSize, useBufferSize); if(errCode != Gravisbell::ErrorCode::ERROR_CODE_NONE) { delete pLayerData; return NULL; } // o_useBufferSize = useBufferSize; return pLayerData; }
b2789284f83b8d98aa2f1418e6c79a6d27d12ab8.cu
//====================================== // バッチ正規化のレイヤーデータ // GPU制御 //====================================== #include"stdafx.h" #include"ExponentialNormalization_LayerData_GPU.cuh" #include"ExponentialNormalization_FUNC.hpp" #include"ExponentialNormalization_GPU.cuh" #include"Library/NeuralNetwork/Optimizer.h" #include"../_LayerBase/CLayerBase_GPU.cuh" using namespace Gravisbell; namespace Gravisbell { namespace Layer { namespace NeuralNetwork { //=========================== // コンストラクタ / デストラクタ //=========================== /** コンストラクタ */ ExponentialNormalization_LayerData_GPU::ExponentialNormalization_LayerData_GPU(const Gravisbell::GUID& guid) : ExponentialNormalization_LayerData_Base(guid) { } /** デストラクタ */ ExponentialNormalization_LayerData_GPU::~ExponentialNormalization_LayerData_GPU() { } //=========================== // 初期化 //=========================== /** 初期化. 各ニューロンの値をランダムに初期化 @return 成功した場合0 */ ErrorCode ExponentialNormalization_LayerData_GPU::Initialize(void) { this->lpMean.resize(this->layerStructure.InputChannelCount); this->lpVariance.resize(this->layerStructure.InputChannelCount); for(U32 ch=0; ch<this->layerStructure.InputChannelCount; ch++) { this->lpMean[ch] = 0.0f; this->lpVariance[ch] = 1.0f; } this->learnTime = 0; /**< 学習回数 */ return ErrorCode::ERROR_CODE_NONE; } /** 初期化. 各ニューロンの値をランダムに初期化 @param i_config 設定情報 @oaram i_inputDataStruct 入力データ構造情報 @return 成功した場合0 */ ErrorCode ExponentialNormalization_LayerData_GPU::Initialize(const SettingData::Standard::IData& i_data) { ErrorCode err; // 設定情報の登録 err = this->SetLayerConfig(i_data); if(err != ErrorCode::ERROR_CODE_NONE) return err; // 初期化 err = this->Initialize(); if(err != ErrorCode::ERROR_CODE_NONE) return err; // オプティマイザーの設定 err = this->ChangeOptimizer(L"SGD"); if(err != ErrorCode::ERROR_CODE_NONE) return err; return ErrorCode::ERROR_CODE_NONE; } /** 初期化. バッファからデータを読み込む @param i_lpBuffer 読み込みバッファの先頭アドレス. @param i_bufferSize 読み込み可能バッファのサイズ. @return 成功した場合0 */ ErrorCode ExponentialNormalization_LayerData_GPU::InitializeFromBuffer(const BYTE* i_lpBuffer, U64 i_bufferSize, S64& o_useBufferSize ) { S64 readBufferByte = 0; // 設定情報 S64 useBufferByte = 0; SettingData::Standard::IData* pLayerStructure = CreateLayerStructureSettingFromBuffer(&i_lpBuffer[readBufferByte], i_bufferSize, useBufferByte); if(pLayerStructure == NULL) return ErrorCode::ERROR_CODE_INITLAYER_READ_CONFIG; readBufferByte += useBufferByte; this->SetLayerConfig(*pLayerStructure); delete pLayerStructure; // 初期化する this->Initialize(); // 平均 cudaMemcpy(thrust::raw_pointer_cast(&this->lpMean[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpMean.size(), cudaMemcpyHostToDevice); readBufferByte += sizeof(F32)*(U32)this->lpMean.size(); // 分散 cudaMemcpy(thrust::raw_pointer_cast(&this->lpVariance[0]), &i_lpBuffer[readBufferByte], sizeof(F32)*this->lpMean.size(), cudaMemcpyHostToDevice); readBufferByte += sizeof(F32)*(U32)this->lpMean.size(); // 学習回数 memcpy(&this->learnTime, &i_lpBuffer[readBufferByte], sizeof(this->learnTime)); readBufferByte += sizeof(this->learnTime); o_useBufferSize = readBufferByte; return ErrorCode::ERROR_CODE_NONE; } //=========================== // レイヤー保存 //=========================== /** レイヤーをバッファに書き込む. @param o_lpBuffer 書き込み先バッファの先頭アドレス. GetUseBufferByteCountの戻り値のバイト数が必要 @return 成功した場合書き込んだバッファサイズ.失敗した場合は負の値 */ S64 ExponentialNormalization_LayerData_GPU::WriteToBuffer(BYTE* o_lpBuffer)const { if(this->pLayerStructure == NULL) return ErrorCode::ERROR_CODE_NONREGIST_CONFIG; S64 writeBufferByte = 0; // 設定情報 writeBufferByte += this->pLayerStructure->WriteToBuffer(&o_lpBuffer[writeBufferByte]); // 平均 cudaMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpMean[0]), sizeof(F32)*this->lpMean.size(), cudaMemcpyDeviceToHost); writeBufferByte += sizeof(F32)*(U32)this->lpMean.size(); // 分散 cudaMemcpy(&o_lpBuffer[writeBufferByte], thrust::raw_pointer_cast(&this->lpVariance[0]), sizeof(F32)*this->lpVariance.size(), cudaMemcpyDeviceToHost); writeBufferByte += sizeof(F32)*(U32)this->lpVariance.size(); // 学習回数 memcpy(&o_lpBuffer[writeBufferByte], &this->learnTime, sizeof(this->learnTime)); writeBufferByte += sizeof(U64); return writeBufferByte; } //=========================== // レイヤー作成 //=========================== /** レイヤーを作成する. @param guid 新規生成するレイヤーのGUID. */ ILayerBase* ExponentialNormalization_LayerData_GPU::CreateLayer(const Gravisbell::GUID& guid, const IODataStruct i_lpInputDataStruct[], U32 i_inputLayerCount, Gravisbell::Common::ITemporaryMemoryManager& i_temporaryMemoryManager) { if(this->CheckCanUseInputDataStruct(i_lpInputDataStruct, i_inputLayerCount) == false) return NULL; return new CNNSingle2SingleLayerBase_GPU<ExponentialNormalization_GPU, ExponentialNormalization_LayerData_GPU>(guid, *this, i_lpInputDataStruct[0], i_temporaryMemoryManager); } //=========================== // オプティマイザー設定 //=========================== /** オプティマイザーを変更する */ ErrorCode ExponentialNormalization_LayerData_GPU::ChangeOptimizer(const wchar_t i_optimizerID[]) { return ErrorCode::ERROR_CODE_NONE; } } // Gravisbell; } // Layer; } // NeuralNetwork; /** Create a layer for GPU processing. * @param GUID of layer to create. */ EXPORT_API Gravisbell::Layer::ILayerData* CreateLayerDataGPU(const Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager, Gravisbell::GUID guid, const Gravisbell::SettingData::Standard::IData& i_data) { // 作成 Gravisbell::Layer::NeuralNetwork::ExponentialNormalization_LayerData_GPU* pLayerData = new Gravisbell::Layer::NeuralNetwork::ExponentialNormalization_LayerData_GPU(guid); if(pLayerData == NULL) return NULL; // 初期化 Gravisbell::ErrorCode errCode = pLayerData->Initialize(i_data); if(errCode != Gravisbell::ErrorCode::ERROR_CODE_NONE) { delete pLayerData; return NULL; } return pLayerData; } EXPORT_API Gravisbell::Layer::ILayerData* CreateLayerDataGPUfromBuffer(const Gravisbell::Layer::NeuralNetwork::ILayerDLLManager* pLayerDLLManager, Gravisbell::GUID guid, const BYTE* i_lpBuffer, S64 i_bufferSize, S64& o_useBufferSize) { // 作成 Gravisbell::Layer::NeuralNetwork::ExponentialNormalization_LayerData_GPU* pLayerData = new Gravisbell::Layer::NeuralNetwork::ExponentialNormalization_LayerData_GPU(guid); if(pLayerData == NULL) return NULL; // 初期化 S64 useBufferSize = 0; Gravisbell::ErrorCode errCode = pLayerData->InitializeFromBuffer(i_lpBuffer, i_bufferSize, useBufferSize); if(errCode != Gravisbell::ErrorCode::ERROR_CODE_NONE) { delete pLayerData; return NULL; } // 使用したバッファ量を格納 o_useBufferSize = useBufferSize; return pLayerData; }
22ee3446bc1cb2cf02cc3953e5be70fcf4e9f42a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "glm/ols.h" #include <vector> #include <gtest/gtest.h> #include <cuda_utils.h> #include <test_utils.h> #include "ml_utils.h" namespace ML { namespace GLM { using namespace MLCommon; template<typename T> struct OlsInputs { T tol; int n_row; int n_col; int n_row_2; int algo; }; template<typename T> class OlsTest: public ::testing::TestWithParam<OlsInputs<T> > { protected: void basicTest() { params = ::testing::TestWithParam<OlsInputs<T>>::GetParam(); int len = params.n_row * params.n_col; int len2 = params.n_row_2 * params.n_col; hipblasHandle_t cublas_handle; CUBLAS_CHECK(hipblasCreate(&cublas_handle)); hipsolverDnHandle_t cusolver_handle = NULL; CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle)); allocate(data, len); allocate(labels, params.n_row); allocate(coef, params.n_col); allocate(coef2, params.n_col); allocate(coef3, params.n_col); allocate(coef_ref, params.n_col); allocate(coef2_ref, params.n_col); allocate(coef3_ref, params.n_col); allocate(pred_data, len2); allocate(pred, params.n_row_2); allocate(pred_ref, params.n_row_2); allocate(pred2, params.n_row_2); allocate(pred2_ref, params.n_row_2); allocate(pred3, params.n_row_2); allocate(pred3_ref, params.n_row_2); std::vector<T> data_h = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0}; data_h.resize(len); updateDevice(data, data_h.data(), len); std::vector<T> labels_h = {6.0, 8.0, 9.0, 11.0}; labels_h.resize(params.n_row); updateDevice(labels, labels_h.data(), params.n_row); std::vector<T> coef_ref_h = {2.090908, 2.5454557}; coef_ref_h.resize(params.n_col); updateDevice(coef_ref, coef_ref_h.data(), params.n_col); std::vector<T> coef2_ref_h = {1.000001 , 1.9999998}; coef2_ref_h.resize(params.n_col); updateDevice(coef2_ref, coef2_ref_h.data(), params.n_col); std::vector<T> coef3_ref_h = {0.99999 , 2.00000}; coef3_ref_h.resize(params.n_col); updateDevice(coef3_ref, coef3_ref_h.data(), params.n_col); std::vector<T> pred_data_h = {3.0, 2.0, 5.0, 5.0}; pred_data_h.resize(len2); updateDevice(pred_data, pred_data_h.data(), len2); std::vector<T> pred_ref_h = {19.0, 16.9090}; pred_ref_h.resize(params.n_row_2); updateDevice(pred_ref, pred_ref_h.data(), params.n_row_2); std::vector<T> pred2_ref_h = {16.0, 15.0}; pred2_ref_h.resize(params.n_row_2); updateDevice(pred2_ref, pred2_ref_h.data(), params.n_row_2); std::vector<T> pred3_ref_h = {16.0, 15.0}; pred3_ref_h.resize(params.n_row_2); updateDevice(pred3_ref, pred3_ref_h.data(), params.n_row_2); intercept = T(0); olsFit(data, params.n_row, params.n_col, labels, coef, &intercept, false, false, cublas_handle, cusolver_handle, params.algo); olsPredict(pred_data, params.n_row_2, params.n_col, coef, intercept, pred, cublas_handle); updateDevice(data, data_h.data(), len); updateDevice(labels, labels_h.data(), params.n_row); intercept2 = T(0); olsFit(data, params.n_row, params.n_col, labels, coef2, &intercept2, true, false, cublas_handle, cusolver_handle, params.algo); olsPredict(pred_data, params.n_row_2, params.n_col, coef2, intercept2, pred2, cublas_handle); updateDevice(data, data_h.data(), len); updateDevice(labels, labels_h.data(), params.n_row); intercept3 = T(0); olsFit(data, params.n_row, params.n_col, labels, coef3, &intercept3, true, true, cublas_handle, cusolver_handle, params.algo); olsPredict(pred_data, params.n_row_2, params.n_col, coef3, intercept3, pred3, cublas_handle); CUBLAS_CHECK(hipblasDestroy(cublas_handle)); CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle)); } void basicTest2() { params = ::testing::TestWithParam<OlsInputs<T>>::GetParam(); int len = params.n_row * params.n_col; hipblasHandle_t cublas_handle; CUBLAS_CHECK(hipblasCreate(&cublas_handle)); hipsolverDnHandle_t cusolver_handle = NULL; CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle)); allocate(data_sc, len); allocate(labels_sc, len); allocate(coef_sc, 1); allocate(coef_sc_ref, 1); std::vector<T> data_h = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0}; data_h.resize(len); updateDevice(data_sc, data_h.data(), len); std::vector<T> labels_h = {6.0, 8.0, 9.0, 11.0, -1.0, 2.0, -3.6, 3.3}; labels_h.resize(len); updateDevice(labels_sc, labels_h.data(), len); std::vector<T> coef_sc_ref_h = {-0.29285714}; coef_sc_ref_h.resize(1); updateDevice(coef_sc_ref, coef_sc_ref_h.data(), 1); T intercept_sc = T(0); olsFit(data_sc, len, 1, labels_sc, coef_sc, &intercept_sc, true, false, cublas_handle, cusolver_handle, params.algo); CUBLAS_CHECK(hipblasDestroy(cublas_handle)); CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle)); } void SetUp() override { basicTest(); basicTest2(); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(labels)); CUDA_CHECK(hipFree(coef)); CUDA_CHECK(hipFree(coef_ref)); CUDA_CHECK(hipFree(coef2)); CUDA_CHECK(hipFree(coef2_ref)); CUDA_CHECK(hipFree(coef3)); CUDA_CHECK(hipFree(coef3_ref)); CUDA_CHECK(hipFree(pred_data)); CUDA_CHECK(hipFree(pred)); CUDA_CHECK(hipFree(pred_ref)); CUDA_CHECK(hipFree(pred2)); CUDA_CHECK(hipFree(pred2_ref)); CUDA_CHECK(hipFree(pred3)); CUDA_CHECK(hipFree(pred3_ref)); CUDA_CHECK(hipFree(data_sc)); CUDA_CHECK(hipFree(labels_sc)); CUDA_CHECK(hipFree(coef_sc)); CUDA_CHECK(hipFree(coef_sc_ref)); } protected: OlsInputs<T> params; T *data, *labels, *coef, *coef_ref, *pred_data, *pred, *pred_ref; T *coef2, *coef2_ref, *pred2, *pred2_ref; T *coef3, *coef3_ref, *pred3, *pred3_ref; T *data_sc, *labels_sc, *coef_sc, *coef_sc_ref; T intercept, intercept2, intercept3; }; const std::vector<OlsInputs<float> > inputsf2 = { { 0.001f, 4, 2, 2, 0 }, { 0.001f, 4, 2, 2, 1 }, { 0.001f, 4, 2, 2, 2 } }; const std::vector<OlsInputs<double> > inputsd2 = { { 0.001, 4, 2, 2, 0 }, { 0.001, 4, 2, 2, 1 }, { 0.001, 4, 2, 2, 2 } }; typedef OlsTest<float> OlsTestF; TEST_P(OlsTestF, Fit) { ASSERT_TRUE( devArrMatch(coef_ref, coef, params.n_col, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(coef2_ref, coef2, params.n_col, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(coef3_ref, coef3, params.n_col, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(pred_ref, pred, params.n_row_2, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(pred2_ref, pred2, params.n_row_2, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(pred3_ref, pred3, params.n_row_2, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(coef_sc_ref, coef_sc, 1, CompareApproxAbs<float>(params.tol))); } typedef OlsTest<double> OlsTestD; TEST_P(OlsTestD, Fit) { ASSERT_TRUE( devArrMatch(coef_ref, coef, params.n_col, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(coef2_ref, coef2, params.n_col, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(coef3_ref, coef3, params.n_col, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(pred_ref, pred, params.n_row_2, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(pred2_ref, pred2, params.n_row_2, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(pred3_ref, pred3, params.n_row_2, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(coef_sc_ref, coef_sc, 1, CompareApproxAbs<double>(params.tol))); } INSTANTIATE_TEST_CASE_P(OlsTests, OlsTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(OlsTests, OlsTestD, ::testing::ValuesIn(inputsd2)); } } // end namespace ML
22ee3446bc1cb2cf02cc3953e5be70fcf4e9f42a.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "glm/ols.h" #include <vector> #include <gtest/gtest.h> #include <cuda_utils.h> #include <test_utils.h> #include "ml_utils.h" namespace ML { namespace GLM { using namespace MLCommon; template<typename T> struct OlsInputs { T tol; int n_row; int n_col; int n_row_2; int algo; }; template<typename T> class OlsTest: public ::testing::TestWithParam<OlsInputs<T> > { protected: void basicTest() { params = ::testing::TestWithParam<OlsInputs<T>>::GetParam(); int len = params.n_row * params.n_col; int len2 = params.n_row_2 * params.n_col; cublasHandle_t cublas_handle; CUBLAS_CHECK(cublasCreate(&cublas_handle)); cusolverDnHandle_t cusolver_handle = NULL; CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle)); allocate(data, len); allocate(labels, params.n_row); allocate(coef, params.n_col); allocate(coef2, params.n_col); allocate(coef3, params.n_col); allocate(coef_ref, params.n_col); allocate(coef2_ref, params.n_col); allocate(coef3_ref, params.n_col); allocate(pred_data, len2); allocate(pred, params.n_row_2); allocate(pred_ref, params.n_row_2); allocate(pred2, params.n_row_2); allocate(pred2_ref, params.n_row_2); allocate(pred3, params.n_row_2); allocate(pred3_ref, params.n_row_2); std::vector<T> data_h = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0}; data_h.resize(len); updateDevice(data, data_h.data(), len); std::vector<T> labels_h = {6.0, 8.0, 9.0, 11.0}; labels_h.resize(params.n_row); updateDevice(labels, labels_h.data(), params.n_row); std::vector<T> coef_ref_h = {2.090908, 2.5454557}; coef_ref_h.resize(params.n_col); updateDevice(coef_ref, coef_ref_h.data(), params.n_col); std::vector<T> coef2_ref_h = {1.000001 , 1.9999998}; coef2_ref_h.resize(params.n_col); updateDevice(coef2_ref, coef2_ref_h.data(), params.n_col); std::vector<T> coef3_ref_h = {0.99999 , 2.00000}; coef3_ref_h.resize(params.n_col); updateDevice(coef3_ref, coef3_ref_h.data(), params.n_col); std::vector<T> pred_data_h = {3.0, 2.0, 5.0, 5.0}; pred_data_h.resize(len2); updateDevice(pred_data, pred_data_h.data(), len2); std::vector<T> pred_ref_h = {19.0, 16.9090}; pred_ref_h.resize(params.n_row_2); updateDevice(pred_ref, pred_ref_h.data(), params.n_row_2); std::vector<T> pred2_ref_h = {16.0, 15.0}; pred2_ref_h.resize(params.n_row_2); updateDevice(pred2_ref, pred2_ref_h.data(), params.n_row_2); std::vector<T> pred3_ref_h = {16.0, 15.0}; pred3_ref_h.resize(params.n_row_2); updateDevice(pred3_ref, pred3_ref_h.data(), params.n_row_2); intercept = T(0); olsFit(data, params.n_row, params.n_col, labels, coef, &intercept, false, false, cublas_handle, cusolver_handle, params.algo); olsPredict(pred_data, params.n_row_2, params.n_col, coef, intercept, pred, cublas_handle); updateDevice(data, data_h.data(), len); updateDevice(labels, labels_h.data(), params.n_row); intercept2 = T(0); olsFit(data, params.n_row, params.n_col, labels, coef2, &intercept2, true, false, cublas_handle, cusolver_handle, params.algo); olsPredict(pred_data, params.n_row_2, params.n_col, coef2, intercept2, pred2, cublas_handle); updateDevice(data, data_h.data(), len); updateDevice(labels, labels_h.data(), params.n_row); intercept3 = T(0); olsFit(data, params.n_row, params.n_col, labels, coef3, &intercept3, true, true, cublas_handle, cusolver_handle, params.algo); olsPredict(pred_data, params.n_row_2, params.n_col, coef3, intercept3, pred3, cublas_handle); CUBLAS_CHECK(cublasDestroy(cublas_handle)); CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle)); } void basicTest2() { params = ::testing::TestWithParam<OlsInputs<T>>::GetParam(); int len = params.n_row * params.n_col; cublasHandle_t cublas_handle; CUBLAS_CHECK(cublasCreate(&cublas_handle)); cusolverDnHandle_t cusolver_handle = NULL; CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle)); allocate(data_sc, len); allocate(labels_sc, len); allocate(coef_sc, 1); allocate(coef_sc_ref, 1); std::vector<T> data_h = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0}; data_h.resize(len); updateDevice(data_sc, data_h.data(), len); std::vector<T> labels_h = {6.0, 8.0, 9.0, 11.0, -1.0, 2.0, -3.6, 3.3}; labels_h.resize(len); updateDevice(labels_sc, labels_h.data(), len); std::vector<T> coef_sc_ref_h = {-0.29285714}; coef_sc_ref_h.resize(1); updateDevice(coef_sc_ref, coef_sc_ref_h.data(), 1); T intercept_sc = T(0); olsFit(data_sc, len, 1, labels_sc, coef_sc, &intercept_sc, true, false, cublas_handle, cusolver_handle, params.algo); CUBLAS_CHECK(cublasDestroy(cublas_handle)); CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle)); } void SetUp() override { basicTest(); basicTest2(); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(labels)); CUDA_CHECK(cudaFree(coef)); CUDA_CHECK(cudaFree(coef_ref)); CUDA_CHECK(cudaFree(coef2)); CUDA_CHECK(cudaFree(coef2_ref)); CUDA_CHECK(cudaFree(coef3)); CUDA_CHECK(cudaFree(coef3_ref)); CUDA_CHECK(cudaFree(pred_data)); CUDA_CHECK(cudaFree(pred)); CUDA_CHECK(cudaFree(pred_ref)); CUDA_CHECK(cudaFree(pred2)); CUDA_CHECK(cudaFree(pred2_ref)); CUDA_CHECK(cudaFree(pred3)); CUDA_CHECK(cudaFree(pred3_ref)); CUDA_CHECK(cudaFree(data_sc)); CUDA_CHECK(cudaFree(labels_sc)); CUDA_CHECK(cudaFree(coef_sc)); CUDA_CHECK(cudaFree(coef_sc_ref)); } protected: OlsInputs<T> params; T *data, *labels, *coef, *coef_ref, *pred_data, *pred, *pred_ref; T *coef2, *coef2_ref, *pred2, *pred2_ref; T *coef3, *coef3_ref, *pred3, *pred3_ref; T *data_sc, *labels_sc, *coef_sc, *coef_sc_ref; T intercept, intercept2, intercept3; }; const std::vector<OlsInputs<float> > inputsf2 = { { 0.001f, 4, 2, 2, 0 }, { 0.001f, 4, 2, 2, 1 }, { 0.001f, 4, 2, 2, 2 } }; const std::vector<OlsInputs<double> > inputsd2 = { { 0.001, 4, 2, 2, 0 }, { 0.001, 4, 2, 2, 1 }, { 0.001, 4, 2, 2, 2 } }; typedef OlsTest<float> OlsTestF; TEST_P(OlsTestF, Fit) { ASSERT_TRUE( devArrMatch(coef_ref, coef, params.n_col, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(coef2_ref, coef2, params.n_col, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(coef3_ref, coef3, params.n_col, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(pred_ref, pred, params.n_row_2, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(pred2_ref, pred2, params.n_row_2, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(pred3_ref, pred3, params.n_row_2, CompareApproxAbs<float>(params.tol))); ASSERT_TRUE( devArrMatch(coef_sc_ref, coef_sc, 1, CompareApproxAbs<float>(params.tol))); } typedef OlsTest<double> OlsTestD; TEST_P(OlsTestD, Fit) { ASSERT_TRUE( devArrMatch(coef_ref, coef, params.n_col, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(coef2_ref, coef2, params.n_col, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(coef3_ref, coef3, params.n_col, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(pred_ref, pred, params.n_row_2, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(pred2_ref, pred2, params.n_row_2, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(pred3_ref, pred3, params.n_row_2, CompareApproxAbs<double>(params.tol))); ASSERT_TRUE( devArrMatch(coef_sc_ref, coef_sc, 1, CompareApproxAbs<double>(params.tol))); } INSTANTIATE_TEST_CASE_P(OlsTests, OlsTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(OlsTests, OlsTestD, ::testing::ValuesIn(inputsd2)); } } // end namespace ML
8ca0add7c0712be6c58e8f8f462e131f93b39f33.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define _USE_MATH_DEFINES #include <math.h> #include <fftw3.h> //CBLAS extern "C" { #include <cblas.h> } #include <iostream> #include <stdlib.h> #include <stdio.h> //CuBlas #include <rocblas.h> #include "omp.h" // custom headers #include "./include/tools.h" /*__global__ void julia_kernel( int n, double *a, double *b, double *c ){*/ /*int i = threadIdx.y;*/ /*int j = threadIdx.x;*/ /*int gi = threadIdx.y + blockDim.y*blockIdx.y;*/ /*int gj = threadIdx.x + blockDim.x*blockIdx.x;*/ /*}*/ int main( int argc, char** argv ){ int args_needed = 1; if (argc < args_needed + 1 ){ std::cout <<"Arg number error, needed: " << args_needed<< std::endl; return 0; } std::cout << "cuFFT Test - Discrete Cosine Transform" << std::endl; // CUDA Timmers hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // OMP int ncpu = 1; omp_set_num_threads(ncpu); // Creating matrices - using two vectors std::cout << "PI number: "<< M_PI << std::endl; int size_m = 16; int size_n = 16; double *x_n = new double[size_m * size_n]; double *m_line = new double[size_m]; double *n_line = new double[size_n]; // Fill and Print fill_vector_cos(3, size_m, m_line); fill_vector_cos(2, size_n, n_line); print_array(m_line, size_m); print_array(n_line, size_n); // CPU - CBLAS cblas_dgemm(CblasRowMajor, // Layout CblasNoTrans, // trans a CblasNoTrans, // trans b 16, // m 16, // n 1, // k 1.0, // alpha m_line, // a matrix 1, // lda n_line, // b matrix 16, // ldb 0.0, // beta x_n, // c matrix 16 // ldc ); print_array(x_n, 16*16); // CUDA // Data double *m_line_d; double *n_line_d; double *x_n_d; // CUDA Malloc hipMalloc((void **)&m_line_d, sizeof(double)*size_m); hipMalloc((void **)&n_line_d, sizeof(double)*size_n); hipMalloc((void **)&x_n_d, sizeof(double)*size_m*size_n); // CUDA Handle hipblasHandle_t cublasHandle; hipEvent_t startcublas; hipEvent_t stopcublas; // cublas event create hipEventCreate(&startcublas); hipEventCreate(&stopcublas); hipblasCreate(&cublasHandle); // Tensor cores enabled /*cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); */ hipEventRecord(startcublas); const double alpha = 1.0; const double beta = 0.0; const double *alpha_ptr = &alpha; const double *beta_ptr = &beta; hipblasDgemm( cublasHandle, // hanlde HIPBLAS_OP_T, // trans a HIPBLAS_OP_T, // trans b 16, // m 16, // n 1, // k alpha_ptr, // alpha m_line_d, // a matrix 1, // lda n_line_d, // b matrix 16, // ldb beta_ptr, // beta x_n_d, // c matrix 16 // ldc ); hipEventRecord(stopcublas); float cublasTime; hipEventSynchronize(stopcublas); hipEventElapsedTime(&cublasTime, startcublas, stopcublas); std::cout << "cublas took: " << cublasTime << std::endl; // Free data delete x_n; delete m_line; delete n_line; // cuda free hipEventDestroy(startcublas); hipEventDestroy(stopcublas); hipFree(m_line_d); hipFree(n_line_d); hipFree(x_n_d); return 0; }
8ca0add7c0712be6c58e8f8f462e131f93b39f33.cu
#define _USE_MATH_DEFINES #include <math.h> #include <fftw3.h> //CBLAS extern "C" { #include <cblas.h> } #include <iostream> #include <stdlib.h> #include <stdio.h> //CuBlas #include <cublas_v2.h> #include "omp.h" // custom headers #include "./include/tools.h" /*__global__ void julia_kernel( int n, double *a, double *b, double *c ){*/ /*int i = threadIdx.y;*/ /*int j = threadIdx.x;*/ /*int gi = threadIdx.y + blockDim.y*blockIdx.y;*/ /*int gj = threadIdx.x + blockDim.x*blockIdx.x;*/ /*}*/ int main( int argc, char** argv ){ int args_needed = 1; if (argc < args_needed + 1 ){ std::cout <<"Arg number error, needed: " << args_needed<< std::endl; return 0; } std::cout << "cuFFT Test - Discrete Cosine Transform" << std::endl; // CUDA Timmers cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // OMP int ncpu = 1; omp_set_num_threads(ncpu); // Creating matrices - using two vectors std::cout << "PI number: "<< M_PI << std::endl; int size_m = 16; int size_n = 16; double *x_n = new double[size_m * size_n]; double *m_line = new double[size_m]; double *n_line = new double[size_n]; // Fill and Print fill_vector_cos(3, size_m, m_line); fill_vector_cos(2, size_n, n_line); print_array(m_line, size_m); print_array(n_line, size_n); // CPU - CBLAS cblas_dgemm(CblasRowMajor, // Layout CblasNoTrans, // trans a CblasNoTrans, // trans b 16, // m 16, // n 1, // k 1.0, // alpha m_line, // a matrix 1, // lda n_line, // b matrix 16, // ldb 0.0, // beta x_n, // c matrix 16 // ldc ); print_array(x_n, 16*16); // CUDA // Data double *m_line_d; double *n_line_d; double *x_n_d; // CUDA Malloc cudaMalloc((void **)&m_line_d, sizeof(double)*size_m); cudaMalloc((void **)&n_line_d, sizeof(double)*size_n); cudaMalloc((void **)&x_n_d, sizeof(double)*size_m*size_n); // CUDA Handle cublasHandle_t cublasHandle; cudaEvent_t startcublas; cudaEvent_t stopcublas; // cublas event create cudaEventCreate(&startcublas); cudaEventCreate(&stopcublas); cublasCreate(&cublasHandle); // Tensor cores enabled /*cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); */ cudaEventRecord(startcublas); const double alpha = 1.0; const double beta = 0.0; const double *alpha_ptr = &alpha; const double *beta_ptr = &beta; cublasDgemm( cublasHandle, // hanlde CUBLAS_OP_T, // trans a CUBLAS_OP_T, // trans b 16, // m 16, // n 1, // k alpha_ptr, // alpha m_line_d, // a matrix 1, // lda n_line_d, // b matrix 16, // ldb beta_ptr, // beta x_n_d, // c matrix 16 // ldc ); cudaEventRecord(stopcublas); float cublasTime; cudaEventSynchronize(stopcublas); cudaEventElapsedTime(&cublasTime, startcublas, stopcublas); std::cout << "cublas took: " << cublasTime << std::endl; // Free data delete x_n; delete m_line; delete n_line; // cuda free cudaEventDestroy(startcublas); cudaEventDestroy(stopcublas); cudaFree(m_line_d); cudaFree(n_line_d); cudaFree(x_n_d); return 0; }
b3c1b7fde82fff2be60bab1987e298553d869b28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <time.h> __global__ void vAdd(int* a, int* b, int* c, int n){ int i = threadIdx.x; if(i<n) c[i] = a[i] + b[i]; } void wrapper(int* a, int* b, int* c, int n){ int *d_a,*d_b,*d_c; hipMalloc(&d_a,n*sizeof(int)); hipMalloc(&d_b,n*sizeof(int)); hipMalloc(&d_c,n*sizeof(int)); hipMemcpy(d_a,a,n * sizeof(int),hipMemcpyHostToDevice); hipMemcpy(d_b,b,n * sizeof(int),hipMemcpyHostToDevice); hipMemcpy(d_c,c,n * sizeof(int),hipMemcpyHostToDevice); clock_t start = clock(); hipLaunchKernelGGL(( vAdd), dim3(1),dim3(n) , 0, 0, d_a,d_b,d_c,n); clock_t end = clock(); printf("Took %f Seconds", float(end-start)/CLOCKS_PER_SEC); //hipMemcpy(a,d_a,n*sizeof(int),hipMemcpyDeviceToHost); //hipMemcpy(b,d_b,n*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(c,d_c,n*sizeof(int),hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); hipFree(d_c); return; }
b3c1b7fde82fff2be60bab1987e298553d869b28.cu
#include <stdio.h> #include <time.h> __global__ void vAdd(int* a, int* b, int* c, int n){ int i = threadIdx.x; if(i<n) c[i] = a[i] + b[i]; } void wrapper(int* a, int* b, int* c, int n){ int *d_a,*d_b,*d_c; cudaMalloc(&d_a,n*sizeof(int)); cudaMalloc(&d_b,n*sizeof(int)); cudaMalloc(&d_c,n*sizeof(int)); cudaMemcpy(d_a,a,n * sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,n * sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_c,c,n * sizeof(int),cudaMemcpyHostToDevice); clock_t start = clock(); vAdd<<< 1,n >>>(d_a,d_b,d_c,n); clock_t end = clock(); printf("Took %f Seconds", float(end-start)/CLOCKS_PER_SEC); //cudaMemcpy(a,d_a,n*sizeof(int),cudaMemcpyDeviceToHost); //cudaMemcpy(b,d_b,n*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(c,d_c,n*sizeof(int),cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return; }
d4b1f34d93baa6cb286f713e4b56387b64a92d66.hip
// !!! This is a file automatically generated by hipify!!! // /usr/local/cuda/bin/nvcc testSpeedUp.cu -o test -O3 // WARNING for OPTIMIZATION // warning: compiling with nvcc -O3 filename.cu will pass the -O3 option to host code only. // nvcc -Xptxas -O3,-v filename.cu // https://stackoverflow.com/questions/43706755/how-can-i-get-the-nvcc-cuda-compiler-to-optimize-more #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #define N (1024*1024) #define M (10000) #define THREADS_PER_BLOCK 1024 void serial_add(double *a, double *b, double *c, int n, int m) { for(int index=0;index<n;index++) { for(int j=0;j<m;j++) { c[index] = a[index]*a[index] + b[index]*b[index]; } } } __global__ void vector_add(double *a, double *b, double *c) { int index = blockIdx.x * blockDim.x + threadIdx.x; for(int j=0;j<M;j++) { c[index] = a[index]*a[index] + b[index]*b[index]; } } int main() { clock_t start,end; double *a, *b, *c; int size = N * sizeof( double ); a = (double *)malloc( size ); b = (double *)malloc( size ); c = (double *)malloc( size ); for( int i = 0; i < N; i++ ) { a[i] = b[i] = i; c[i] = 0; } start = clock(); serial_add(a, b, c, N, M); printf( "c[0] = %d\n",0,c[0] ); printf( "c[%d] = %d\n",N-1, c[N-1] ); end = clock(); float time1 = ((float)(end-start))/CLOCKS_PER_SEC; printf("Serial: %f seconds\n",time1); start = clock(); double *d_a, *d_b, *d_c; hipMalloc( (void **) &d_a, size ); hipMalloc( (void **) &d_b, size ); hipMalloc( (void **) &d_c, size ); hipMemcpy( d_a, a, size, hipMemcpyHostToDevice ); hipMemcpy( d_b, b, size, hipMemcpyHostToDevice ); hipLaunchKernelGGL(( vector_add), dim3((N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK) , 0, 0, d_a, d_b, d_c ); hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost ); printf( "c[0] = %d\n",0,c[0] ); printf( "c[%d] = %d\n",N-1, c[N-1] ); free(a); free(b); free(c); hipFree( d_a ); hipFree( d_b ); hipFree( d_c ); end = clock(); float time2 = ((float)(end-start))/CLOCKS_PER_SEC; printf("CUDA: %f seconds, Speedup: %f\n",time2, time1/time2); return 0; }
d4b1f34d93baa6cb286f713e4b56387b64a92d66.cu
// /usr/local/cuda/bin/nvcc testSpeedUp.cu -o test -O3 // WARNING for OPTIMIZATION // warning: compiling with nvcc -O3 filename.cu will pass the -O3 option to host code only. // nvcc -Xptxas -O3,-v filename.cu // https://stackoverflow.com/questions/43706755/how-can-i-get-the-nvcc-cuda-compiler-to-optimize-more #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #define N (1024*1024) #define M (10000) #define THREADS_PER_BLOCK 1024 void serial_add(double *a, double *b, double *c, int n, int m) { for(int index=0;index<n;index++) { for(int j=0;j<m;j++) { c[index] = a[index]*a[index] + b[index]*b[index]; } } } __global__ void vector_add(double *a, double *b, double *c) { int index = blockIdx.x * blockDim.x + threadIdx.x; for(int j=0;j<M;j++) { c[index] = a[index]*a[index] + b[index]*b[index]; } } int main() { clock_t start,end; double *a, *b, *c; int size = N * sizeof( double ); a = (double *)malloc( size ); b = (double *)malloc( size ); c = (double *)malloc( size ); for( int i = 0; i < N; i++ ) { a[i] = b[i] = i; c[i] = 0; } start = clock(); serial_add(a, b, c, N, M); printf( "c[0] = %d\n",0,c[0] ); printf( "c[%d] = %d\n",N-1, c[N-1] ); end = clock(); float time1 = ((float)(end-start))/CLOCKS_PER_SEC; printf("Serial: %f seconds\n",time1); start = clock(); double *d_a, *d_b, *d_c; cudaMalloc( (void **) &d_a, size ); cudaMalloc( (void **) &d_b, size ); cudaMalloc( (void **) &d_c, size ); cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice ); cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice ); vector_add<<< (N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, d_b, d_c ); cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost ); printf( "c[0] = %d\n",0,c[0] ); printf( "c[%d] = %d\n",N-1, c[N-1] ); free(a); free(b); free(c); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); end = clock(); float time2 = ((float)(end-start))/CLOCKS_PER_SEC; printf("CUDA: %f seconds, Speedup: %f\n",time2, time1/time2); return 0; }
3b763a2e3aee9396ef438a33cd9528bc8fa3110e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include <math.h> #include <matrix.h> #include <mex.h> #include "include/utils.cuh" #include "include/settings.h" #include "cutil.h" __global__ void convolve(float *a, float *b, float *c, int N, int K0, int K, int n, int m, int nz, int gridOffset) { int i = blockIdx.y, j = blockIdx.x, ni = gridOffset + threadIdx.z, k0 = threadIdx.x, k = threadIdx.y, ii, jj; float res = 0; if (ni >= N) return; for (ii = max(0, i - n + 1); ii < min(m, i + 1); ii++) for (jj = max(0, j - n + 1); jj < min(m, j + 1); jj++) res += a[ni + N * k + N * K * ((i - ii) * n + j - jj)] * b[k0 + K0 * k + K0 * K * (ii * m + jj)]; c[ni + N * k0 + N * K0 * k + N * K0 * K * ((i) * nz + j)] = res; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { const mxArray *a, *b; mxArray *c; const mwSize *dimsa, *dimsb; mwSize *dimsc; double *aa, *bb, *cc; float *fa, *fb, *fc, *da, *db, *dc; int n, m, i, nz, ndima, ndimb, K, K0, N; hipStream_t stream[2]; dim3 threads, blocks; int grids, nPerGrid; a = prhs[0]; b = prhs[1]; ndima = mxGetNumberOfDimensions(a); ndimb = mxGetNumberOfDimensions(b); dimsa = mxGetDimensions(a); dimsb = mxGetDimensions(b); N = dimsa[0]; K0 = dimsb[0]; K = dimsb[1]; if (ndima <= 2) n = 1; else n = dimsa[2]; if (ndimb <= 2) m = 1; else m = dimsb[2]; nz = n + m - 1; dimsc = (mwSize*)mxMalloc(sizeof(mwSize)*5); dimsc[0] = N; dimsc[1] = K0; dimsc[2] = K; dimsc[3] = nz; dimsc[4] = nz; c = plhs[0] = mxCreateNumericArray(5, dimsc, mxDOUBLE_CLASS, mxREAL); mxFree(dimsc); aa = mxGetPr(a); bb = mxGetPr(b); cc = mxGetPr(c); hipSetDevice(DEVICE); hipSetDeviceFlags(hipDeviceMapHost); hipHostMalloc(&fa, sizeof(float) * N * K * n * n); hipHostMalloc(&fb, sizeof(float) * K0 * K * m * m); hipHostMalloc(&fc, sizeof(float) * N * K0 * K * nz * nz); for (i = 0; i < N * K * n * n; i++) fa[i] = (float)aa[i]; for (i = 0; i < K0 * K * m * m; i++) fb[i] = (float)bb[i]; for (i = 0; i < N * K0 * K * nz * nz; i++) fc[i] = i; nPerGrid = min(BLOCKSIZE / K0 / K, MAXBLOCKD3); grids = (N - 1) / nPerGrid + 1; blocks = dim3(nz, nz, 1); hipMalloc(&db, sizeof(float) * K0 * K * m * m); hipMemcpy(db, fb, sizeof(float) * K0 * K * m * m, hipMemcpyHostToDevice); if (grids > 1) { hipStreamCreate(&stream[0]); hipStreamCreate(&stream[1]); threads = dim3(K0, K, nPerGrid); hipMalloc(&da, sizeof(float) * nPerGrid * K * n * n * 2); hipMalloc(&dc, sizeof(float) * nPerGrid * K0 * K * nz * nz * 2); hipMemcpy(da, fa, sizeof(float) * nPerGrid * K * n * n, hipMemcpyHostToDevice); for (i = 0; i < grids; i++) { int stm_cur = i % 2, stm_next = 1 - i % 2; hipLaunchKernelGGL(( convolve), dim3(blocks), dim3(threads), 0, stream[stm_cur], da, db, dc, N - (i - stm_cur) * nPerGrid, K0, K, n, m, nz, stm_cur * nPerGrid); if (i < grids - 1) hipMemcpyAsync(da + stm_next * nPerGrid * K * n * n, fa + (i + 1) * nPerGrid * K * n * n, sizeof(float) * nPerGrid * K * n * n, hipMemcpyHostToDevice, stream[stm_next]); hipMemcpyAsync(fc + i * nPerGrid * K0 * K * nz * nz, dc + stm_cur * nPerGrid * K0 * K * nz * nz, sizeof(float) * nPerGrid * K0 * K * nz * nz, hipMemcpyDeviceToHost, stream[stm_cur]); hipDeviceSynchronize(); } } else { threads = dim3(K0, K, N); hipMalloc(&da, sizeof(float) * N * K * n * n); hipMalloc(&dc, sizeof(float) * N * K0 * K * nz * nz); hipMemcpy(da, fa, sizeof(float) * N * K * n * n, hipMemcpyHostToDevice); hipLaunchKernelGGL(( convolve), dim3(blocks), dim3(threads), 0, 0, da, db, dc, N, K0, K, n, m, nz, 0); hipMemcpy(fc, dc, sizeof(float) * N * K0 * K * nz * nz, hipMemcpyDeviceToHost); } for (i = 0; i < N * K0 * K * nz * nz; i++) cc[i] = (double)fc[i]; hipHostFree(fa); hipHostFree(fb); hipHostFree(fc); hipFree(da); hipFree(db); hipFree(dc); }
3b763a2e3aee9396ef438a33cd9528bc8fa3110e.cu
//#include <math.h> #include <matrix.h> #include <mex.h> #include "include/utils.cuh" #include "include/settings.h" #include "cutil.h" __global__ void convolve(float *a, float *b, float *c, int N, int K0, int K, int n, int m, int nz, int gridOffset) { int i = blockIdx.y, j = blockIdx.x, ni = gridOffset + threadIdx.z, k0 = threadIdx.x, k = threadIdx.y, ii, jj; float res = 0; if (ni >= N) return; for (ii = max(0, i - n + 1); ii < min(m, i + 1); ii++) for (jj = max(0, j - n + 1); jj < min(m, j + 1); jj++) res += a[ni + N * k + N * K * ((i - ii) * n + j - jj)] * b[k0 + K0 * k + K0 * K * (ii * m + jj)]; c[ni + N * k0 + N * K0 * k + N * K0 * K * ((i) * nz + j)] = res; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { const mxArray *a, *b; mxArray *c; const mwSize *dimsa, *dimsb; mwSize *dimsc; double *aa, *bb, *cc; float *fa, *fb, *fc, *da, *db, *dc; int n, m, i, nz, ndima, ndimb, K, K0, N; cudaStream_t stream[2]; dim3 threads, blocks; int grids, nPerGrid; a = prhs[0]; b = prhs[1]; ndima = mxGetNumberOfDimensions(a); ndimb = mxGetNumberOfDimensions(b); dimsa = mxGetDimensions(a); dimsb = mxGetDimensions(b); N = dimsa[0]; K0 = dimsb[0]; K = dimsb[1]; if (ndima <= 2) n = 1; else n = dimsa[2]; if (ndimb <= 2) m = 1; else m = dimsb[2]; nz = n + m - 1; dimsc = (mwSize*)mxMalloc(sizeof(mwSize)*5); dimsc[0] = N; dimsc[1] = K0; dimsc[2] = K; dimsc[3] = nz; dimsc[4] = nz; c = plhs[0] = mxCreateNumericArray(5, dimsc, mxDOUBLE_CLASS, mxREAL); mxFree(dimsc); aa = mxGetPr(a); bb = mxGetPr(b); cc = mxGetPr(c); cudaSetDevice(DEVICE); cudaSetDeviceFlags(cudaDeviceMapHost); cudaMallocHost(&fa, sizeof(float) * N * K * n * n); cudaMallocHost(&fb, sizeof(float) * K0 * K * m * m); cudaMallocHost(&fc, sizeof(float) * N * K0 * K * nz * nz); for (i = 0; i < N * K * n * n; i++) fa[i] = (float)aa[i]; for (i = 0; i < K0 * K * m * m; i++) fb[i] = (float)bb[i]; for (i = 0; i < N * K0 * K * nz * nz; i++) fc[i] = i; nPerGrid = min(BLOCKSIZE / K0 / K, MAXBLOCKD3); grids = (N - 1) / nPerGrid + 1; blocks = dim3(nz, nz, 1); cudaMalloc(&db, sizeof(float) * K0 * K * m * m); cudaMemcpy(db, fb, sizeof(float) * K0 * K * m * m, cudaMemcpyHostToDevice); if (grids > 1) { cudaStreamCreate(&stream[0]); cudaStreamCreate(&stream[1]); threads = dim3(K0, K, nPerGrid); cudaMalloc(&da, sizeof(float) * nPerGrid * K * n * n * 2); cudaMalloc(&dc, sizeof(float) * nPerGrid * K0 * K * nz * nz * 2); cudaMemcpy(da, fa, sizeof(float) * nPerGrid * K * n * n, cudaMemcpyHostToDevice); for (i = 0; i < grids; i++) { int stm_cur = i % 2, stm_next = 1 - i % 2; convolve<<<blocks, threads, 0, stream[stm_cur]>>>(da, db, dc, N - (i - stm_cur) * nPerGrid, K0, K, n, m, nz, stm_cur * nPerGrid); if (i < grids - 1) cudaMemcpyAsync(da + stm_next * nPerGrid * K * n * n, fa + (i + 1) * nPerGrid * K * n * n, sizeof(float) * nPerGrid * K * n * n, cudaMemcpyHostToDevice, stream[stm_next]); cudaMemcpyAsync(fc + i * nPerGrid * K0 * K * nz * nz, dc + stm_cur * nPerGrid * K0 * K * nz * nz, sizeof(float) * nPerGrid * K0 * K * nz * nz, cudaMemcpyDeviceToHost, stream[stm_cur]); cudaDeviceSynchronize(); } } else { threads = dim3(K0, K, N); cudaMalloc(&da, sizeof(float) * N * K * n * n); cudaMalloc(&dc, sizeof(float) * N * K0 * K * nz * nz); cudaMemcpy(da, fa, sizeof(float) * N * K * n * n, cudaMemcpyHostToDevice); convolve<<<blocks, threads>>>(da, db, dc, N, K0, K, n, m, nz, 0); cudaMemcpy(fc, dc, sizeof(float) * N * K0 * K * nz * nz, cudaMemcpyDeviceToHost); } for (i = 0; i < N * K0 * K * nz * nz; i++) cc[i] = (double)fc[i]; cudaFreeHost(fa); cudaFreeHost(fb); cudaFreeHost(fc); cudaFree(da); cudaFree(db); cudaFree(dc); }
fa8d05d40f5fe4c4ee544e1b36d13bd3d370a78b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define BLOCK_SIZE 512 #include <stdio.h> __global__ void force (float *virialArray, float *potentialArray, float *pval, float *vval, float *rx, float *ry, float *rz, float *fx, float *fy, float *fz, float sigma, float rcut, float vrcut, float dvrc12, float dvrcut, int *head, int *list, int mx, int my, int mz, int natoms, int step, float sfx, float sfy, float sfz) { float sigsq, rcutsq; float rxi, ryi, rzi, fxi, fyi, fzi; float rxij, ryij, rzij, rijsq; float rij, sr2, sr6, vij, wij, fij, fxij, fyij, fzij; float potential, virial; int i, icell, j, jcell, nabor; int xi, yi, zi, ix, jx, kx, xcell, ycell, zcell; __shared__ float vArray[BLOCK_SIZE]; __shared__ float pArray[BLOCK_SIZE]; int p_start = BLOCK_SIZE; sigsq = sigma*sigma; rcutsq = rcut*rcut; potential = 0.0f; virial = 0.0f; int element = blockIdx.x * blockDim.x + threadIdx.x; if (element < natoms) { rxi = rx[element]; ryi = ry[element]; rzi = rz[element]; fxi = 0.0f; fyi = 0.0f; fzi = 0.0f; xi = (int)((rxi+0.5f)/sfx) + 1; yi = (int)((ryi+0.5f)/sfy) + 1; zi = (int)((rzi+0.5f)/sfz) + 1; if(xi > mx) xi = mx; if(yi > my) yi = my; if(zi > mz) zi = mz; icell = xi + (mx+2)*(yi+zi*(my+2)); for (ix=-1;ix<=1;ix++) for (jx=-1;jx<=1;jx++) for (kx=-1;kx<=1;kx++){ xcell = ix+xi; ycell = jx+yi; zcell = kx+zi; jcell = xcell + (mx+2)*(ycell+(my+2)*zcell); j = head[jcell]; while (j>=0) { if (j!=element) { rxij = rxi - rx[j]; ryij = ryi - ry[j]; rzij = rzi - rz[j]; rijsq = rxij*rxij + ryij*ryij + rzij*rzij; if (rijsq < rcutsq) { //START FORCE_IJ rij = (float) sqrt ((float)rijsq); sr2 = sigsq/rijsq; sr6 = sr2*sr2*sr2; vij = sr6*(sr6-1.0f) - vrcut - dvrc12*(rij-rcut); wij = sr6*(sr6-0.5f) + dvrcut*rij; fij = wij/rijsq; fxij = fij*rxij; fyij = fij*ryij; fzij = fij*rzij; //END FORCE_IJ wij *= 0.5f; vij *= 0.5f; potential += vij; virial += wij; fxi += fxij; fyi += fyij; fzi += fzij; } } j = list[j]; } } *(fx+element) = 48.0f*fxi; *(fy+element) = 48.0f*fyi; *(fz+element) = 48.0f*fzi; vArray[threadIdx.x] = virial; pArray[threadIdx.x] = potential; unsigned int stride; unsigned int t = threadIdx.x; __syncthreads(); if (t == 0) { // __syncthreads(); for(stride = 1; stride < blockDim.x; stride += 1) { vArray[t]+= vArray[stride]; pArray[t]+= pArray[stride]; } } //__syncthreads(); if(t == 0) { virialArray[blockIdx.x] = vArray[0]; potentialArray[blockIdx.x] = pArray[0]; } } }
fa8d05d40f5fe4c4ee544e1b36d13bd3d370a78b.cu
#define BLOCK_SIZE 512 #include <stdio.h> __global__ void force (float *virialArray, float *potentialArray, float *pval, float *vval, float *rx, float *ry, float *rz, float *fx, float *fy, float *fz, float sigma, float rcut, float vrcut, float dvrc12, float dvrcut, int *head, int *list, int mx, int my, int mz, int natoms, int step, float sfx, float sfy, float sfz) { float sigsq, rcutsq; float rxi, ryi, rzi, fxi, fyi, fzi; float rxij, ryij, rzij, rijsq; float rij, sr2, sr6, vij, wij, fij, fxij, fyij, fzij; float potential, virial; int i, icell, j, jcell, nabor; int xi, yi, zi, ix, jx, kx, xcell, ycell, zcell; __shared__ float vArray[BLOCK_SIZE]; __shared__ float pArray[BLOCK_SIZE]; int p_start = BLOCK_SIZE; sigsq = sigma*sigma; rcutsq = rcut*rcut; potential = 0.0f; virial = 0.0f; int element = blockIdx.x * blockDim.x + threadIdx.x; if (element < natoms) { rxi = rx[element]; ryi = ry[element]; rzi = rz[element]; fxi = 0.0f; fyi = 0.0f; fzi = 0.0f; xi = (int)((rxi+0.5f)/sfx) + 1; yi = (int)((ryi+0.5f)/sfy) + 1; zi = (int)((rzi+0.5f)/sfz) + 1; if(xi > mx) xi = mx; if(yi > my) yi = my; if(zi > mz) zi = mz; icell = xi + (mx+2)*(yi+zi*(my+2)); for (ix=-1;ix<=1;ix++) for (jx=-1;jx<=1;jx++) for (kx=-1;kx<=1;kx++){ xcell = ix+xi; ycell = jx+yi; zcell = kx+zi; jcell = xcell + (mx+2)*(ycell+(my+2)*zcell); j = head[jcell]; while (j>=0) { if (j!=element) { rxij = rxi - rx[j]; ryij = ryi - ry[j]; rzij = rzi - rz[j]; rijsq = rxij*rxij + ryij*ryij + rzij*rzij; if (rijsq < rcutsq) { //START FORCE_IJ rij = (float) sqrt ((float)rijsq); sr2 = sigsq/rijsq; sr6 = sr2*sr2*sr2; vij = sr6*(sr6-1.0f) - vrcut - dvrc12*(rij-rcut); wij = sr6*(sr6-0.5f) + dvrcut*rij; fij = wij/rijsq; fxij = fij*rxij; fyij = fij*ryij; fzij = fij*rzij; //END FORCE_IJ wij *= 0.5f; vij *= 0.5f; potential += vij; virial += wij; fxi += fxij; fyi += fyij; fzi += fzij; } } j = list[j]; } } *(fx+element) = 48.0f*fxi; *(fy+element) = 48.0f*fyi; *(fz+element) = 48.0f*fzi; vArray[threadIdx.x] = virial; pArray[threadIdx.x] = potential; unsigned int stride; unsigned int t = threadIdx.x; __syncthreads(); if (t == 0) { // __syncthreads(); for(stride = 1; stride < blockDim.x; stride += 1) { vArray[t]+= vArray[stride]; pArray[t]+= pArray[stride]; } } //__syncthreads(); if(t == 0) { virialArray[blockIdx.x] = vArray[0]; potentialArray[blockIdx.x] = pArray[0]; } } }
9c8afa56e8a9fa2617b5ddbfe57c68a859d7155a.hip
// !!! This is a file automatically generated by hipify!!! #include "Entity.hpp" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "Vector2.hpp" #include "Grid.hpp" #include "Entity.hpp" #include "Player.hpp" #include "Item.hpp" #include "Map.hpp" #include "Model.hpp" #include "MapEditor.hpp" using namespace std; unsigned int entities_size_in_bf = 0; unsigned int entities_position = 0; vector<struct entity> entities; __forceinline__ __device__ float getInterpixel(const unsigned char* frame, const unsigned int width, const unsigned int height, const unsigned int channels, float x, float y, const int c) { int x_i = (int)x; int y_i = (int)y; x -= x_i; y -= y_i; unsigned char value_components[4]; value_components[0] = frame[y_i * (width * channels) + x_i * channels + c]; if (x > 0) { if (x_i + 1 < width) { value_components[1] = frame[y_i * (width * channels) + (x_i + 1) * channels + c]; } else { x = 0.0f; } } if (y > 0) { if (y_i + 1 < height) { value_components[2] = frame[(y_i + 1) * (width * channels) + x_i * channels + c]; if (x > 0) { value_components[3] = frame[(y_i + 1) * (width * channels) + (x_i + 1) * channels + c]; } } else { y = 0.0f; } } float m_0 = 4.0f / 16.0f; float m_1 = 4.0f / 16.0f; float m_2 = 4.0f / 16.0f; float m_3 = 4.0f / 16.0f; float tmp, tmp2; if (x <= 0.5f) { tmp = ((0.5f - x) / 0.5f) * m_1; m_0 += tmp; m_1 -= tmp; m_2 += tmp; m_3 -= tmp; } else { tmp = ((x - 0.5f) / 0.5f) * m_0; m_0 -= tmp; m_1 += tmp; m_2 -= tmp; m_3 += tmp; } if (y <= 0.5f) { tmp = ((0.5f - y) / 0.5f) * m_2; tmp2 = ((0.5f - y) / 0.5f) * m_3; m_0 += tmp; m_1 += tmp2; m_2 -= tmp; m_3 -= tmp2; } else { tmp = ((y - 0.5f) / 0.5f) * m_0; tmp2 = ((y - 0.5f) / 0.5f) * m_1; m_0 -= tmp; m_1 -= tmp2; m_2 += tmp; m_3 += tmp2; } float value = m_0 * value_components[0] + m_1 * value_components[1] + m_2 * value_components[2] + m_3 * value_components[3]; return value; } __global__ void draw_entities_kernel( const unsigned int* device_data_assets, const unsigned int* device_data_map, const unsigned int players_models_position, const unsigned int item_models_position, const unsigned int map_models_position, const unsigned int font_position, const unsigned int* device_data_rw, const unsigned int entities_position, const unsigned int selected_player_eid, const unsigned int gd_position_in_bf, const unsigned int gd_data_position_in_bf, const struct vector2<unsigned int> map_dimensions_center, const unsigned int map_pathables, const unsigned char draw_pathing, const struct vector2<unsigned int> pathing_brushsize, unsigned int* device_data_output, const unsigned int output_position, const unsigned int output_width, const unsigned int output_height, const unsigned int output_channels, const unsigned int camera_x1, const unsigned int camera_y1, const float camera_z, const struct vector2<unsigned int> mouse_position, const unsigned int tick_counter) { int i = blockIdx.x * blockDim.x + threadIdx.x; //unsigned int players_count = device_data_players[players_position-1] / (unsigned int)ceilf(sizeof(struct player) / (float)sizeof(unsigned int)); //struct player* players = (struct player*) &device_data_players[players_position]; struct entity* entities = (struct entity*) &device_data_rw[entities_position]; struct model* player_models = (struct model*) &device_data_assets[players_models_position]; struct model* item_models = (struct model*) &device_data_assets[item_models_position]; struct model* map_models = (struct model*) &device_data_map[map_models_position]; if (i < output_width * output_height * output_channels) { int current_channel = i / (output_width * output_height); int current_idx = i % (output_width * output_height); int current_x = (current_idx % output_width); int current_y = (current_idx / output_width); float current_game_x = camera_x1 + current_x*camera_z; float current_game_y = camera_y1 + current_y*camera_z; float current_mouse_game_x = camera_x1 + mouse_position[0] * camera_z; float current_mouse_game_y = camera_y1 + mouse_position[1] * camera_z; int sampling_filter_dim = ceilf(camera_z); unsigned char* output = (unsigned char*)&device_data_output[output_position]; /*if ((int)(current_game_x) % 32 == 0 || (int)(current_game_y) % 32 == 0) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 3] = 255; }*/ int grid_current_idx = grid_get_index(device_data_rw, gd_position_in_bf, struct vector3<float> (current_game_x, current_game_y, 0.0f)); if (grid_current_idx != -1) { unsigned int entities_iddata_position = device_data_rw[gd_data_position_in_bf + 1 + grid_current_idx]; if (entities_iddata_position > 0) { unsigned int entities_count = device_data_rw[entities_iddata_position]; for (int e = 0; e < entities_count; e++) { unsigned int entity_id = device_data_rw[entities_iddata_position + 1 + e]; if (entity_id < UINT_MAX) { /* if ((int)(current_game_x) % 32 == 0 || (int)(current_game_y) % 32 == 0) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255 * (e + 1 % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255 * (e % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 3] = 100; } */ /* output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255 * (e+1 % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255 * (e % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 3] = 100; */ struct model* m = nullptr; const unsigned int* shadows_positions; if (entities[entity_id].et == ET_PLAYER) { m = &player_models[entities[entity_id].model_id]; shadows_positions = &device_data_assets[m->shadow_positions]; } else if (entities[entity_id].et == ET_ITEM) { m = &item_models[entities[entity_id].model_id - 50]; shadows_positions = &device_data_assets[m->shadow_positions]; } else if (entities[entity_id].et == ET_STATIC_ASSET) { m = &map_models[entities[entity_id].model_id - 100]; shadows_positions = &device_data_map[m->shadow_positions]; } if (m != nullptr) { int upscale = 1; float upscale_fac = 1.0f; unsigned int shadow_position = shadows_positions[upscale - 1]; while (camera_z / ((m->shadow_scale * entities[entity_id].scale) / upscale_fac) < 2 && upscale - 1 < m->shadow_zoom_level_count - 1) { upscale++; shadow_position = shadows_positions[upscale - 1]; upscale_fac *= 2.0f; } sampling_filter_dim = ceilf(camera_z / ((m->shadow_scale * entities[entity_id].scale) / upscale_fac)); float offset_to_model_shadow_base_x = (current_game_x - (entities[entity_id].position[0] + (m->shadow_offset[0] * entities[entity_id].scale))) / ((m->shadow_scale * entities[entity_id].scale) / upscale_fac); float offset_to_model_shadow_base_y = (current_game_y - (entities[entity_id].position[1] + (m->shadow_offset[1] * entities[entity_id].scale))) / ((m->shadow_scale * entities[entity_id].scale) / upscale_fac); if (offset_to_model_shadow_base_x >= 1 && offset_to_model_shadow_base_x < m->shadow_dimensions[0] * upscale_fac - 1 && offset_to_model_shadow_base_y >= 1 && offset_to_model_shadow_base_y < m->shadow_dimensions[1] * upscale_fac - 1) { /* output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255 * (e + 1 % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255 * (e % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 3] = 100; */ int animation_tick = (((tick_counter + entities[entity_id].model_animation_offset) / m->shadow_animation_stepsize) % m->shadow_animation_ticks);; if (m->shadow_animation_type == 1) { if (((tick_counter + entities[entity_id].model_animation_offset) / m->shadow_animation_stepsize) / m->shadow_animation_ticks % 2 == 1) { animation_tick = m->shadow_animation_ticks - 1 - animation_tick; } } unsigned int* p_shadow_positions; unsigned char* p_shadow; if (entities[entity_id].et == ET_STATIC_ASSET) { p_shadow_positions = (unsigned int*)&device_data_map[shadow_position]; p_shadow = (unsigned char*)&device_data_map[p_shadow_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->shadow_rotations)) % m->shadow_rotations) * m->shadow_animation_ticks + animation_tick]]; } else { p_shadow_positions = (unsigned int*)&device_data_assets[shadow_position]; p_shadow = (unsigned char*)&device_data_assets[p_shadow_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->shadow_rotations)) % m->shadow_rotations) * m->shadow_animation_ticks + animation_tick]]; } //enum player_stance p_stance = players[p].player_stance; //enum player_action p_action = players[p].player_action; float interpixel_min = 255.0f; float interpixel_alpha_max = 0.0f; for (int s_y = 0; s_y < sampling_filter_dim; s_y++) { for (int s_x = 0; s_x < sampling_filter_dim; s_x++) { if (offset_to_model_shadow_base_x + s_x >= 1 && offset_to_model_shadow_base_x + s_x < m->shadow_dimensions[0] * upscale_fac - 1 && offset_to_model_shadow_base_y + s_y >= 1 && offset_to_model_shadow_base_y + s_y < m->shadow_dimensions[1] * upscale_fac - 1 ) { float model_palette_idx_x = offset_to_model_shadow_base_x + s_x; float model_palette_idx_y = offset_to_model_shadow_base_y + s_y; float interpixel_alpha = getInterpixel(p_shadow, m->shadow_dimensions[0] * upscale_fac, m->shadow_dimensions[1] * upscale_fac, 4, model_palette_idx_x, model_palette_idx_y, 3); if (interpixel_alpha > 25) { if (interpixel_alpha_max < interpixel_alpha) { interpixel_alpha_max = interpixel_alpha; } float interpixel = getInterpixel(p_shadow, m->shadow_dimensions[0] * upscale_fac, m->shadow_dimensions[1] * upscale_fac, 4, model_palette_idx_x, model_palette_idx_y, current_channel); if (interpixel < interpixel_min) { interpixel_min = interpixel; } //output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = (unsigned char)(((255 - interpixel_alpha) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (interpixel_alpha / 255.0f) * interpixel)); } } } } if (interpixel_alpha_max > 25) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = (unsigned char)(((255 - interpixel_alpha_max) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (interpixel_alpha_max / 255.0f) * interpixel_min)); } } } } } unsigned int player_z_max = 0; float player_y_max = -1.0f; int player_id_max = -1; bool has_text = false; for (int e = 0; e < entities_count; e++) { unsigned int entity_id = device_data_rw[entities_iddata_position + 1 + e]; if (entity_id < UINT_MAX) { struct model* m = nullptr; const unsigned int* model_positions; if (entities[entity_id].et == ET_PLAYER) { m = &player_models[entities[entity_id].model_id]; model_positions = &device_data_assets[m->model_positions]; } else if (entities[entity_id].et == ET_ITEM) { m = &item_models[entities[entity_id].model_id - 50]; model_positions = &device_data_assets[m->model_positions]; } else if (entities[entity_id].et == ET_STATIC_ASSET) { m = &map_models[entities[entity_id].model_id - 100]; model_positions = &device_data_map[m->model_positions]; } if (m != nullptr) { int upscale = 1; float upscale_fac = 1.0f; unsigned int model_position = model_positions[upscale - 1]; while (camera_z / ((m->model_scale * entities[entity_id].scale)/ upscale_fac) < 2 && upscale - 1 < m->model_zoom_level_count - 1) { upscale++; model_position = model_positions[upscale - 1]; upscale_fac *= 2.0f; } sampling_filter_dim = ceilf(camera_z / ((m->model_scale * entities[entity_id].scale) / upscale_fac)); float offset_to_model_base_x = (current_game_x - (entities[entity_id].position[0])) / ((m->model_scale * entities[entity_id].scale) / upscale_fac); float offset_to_model_base_y = (current_game_y - (entities[entity_id].position[1])) / ((m->model_scale * entities[entity_id].scale) / upscale_fac); if (offset_to_model_base_x >= 1 && offset_to_model_base_x < m->model_dimensions[0] * upscale_fac - 1 && offset_to_model_base_y >= 1 && offset_to_model_base_y < m->model_dimensions[1] * upscale_fac - 1) { /* output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255 * (e + 1 % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255 * (e % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 3] = 100; */ int animation_tick = (((tick_counter + entities[entity_id].model_animation_offset) / m->model_animation_stepsize) % m->model_animation_ticks); if (m->model_animation_type == 1) { if (((tick_counter + entities[entity_id].model_animation_offset) / m->model_animation_stepsize) / m->model_animation_ticks % 2 == 1) { animation_tick = m->model_animation_ticks - 1 - animation_tick; } } unsigned int* p_model_positions; unsigned char* p_model; if (entities[entity_id].et == ET_STATIC_ASSET) { p_model_positions = (unsigned int*)&device_data_map[model_position]; p_model = (unsigned char*)&device_data_map[p_model_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->model_rotations)) % m->model_rotations) * m->model_animation_ticks + animation_tick]]; } else { p_model_positions = (unsigned int*)&device_data_assets[model_position]; p_model = (unsigned char*)&device_data_assets[p_model_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->model_rotations)) % m->model_rotations) * m->model_animation_ticks + animation_tick]]; } if (m->mt == MT_LOOTABLE_ITEM) { if (offset_to_model_base_x <= 22 * camera_z || offset_to_model_base_y <= 22 * camera_z || offset_to_model_base_x >= m->model_dimensions[0] * upscale_fac - (22 * camera_z) || offset_to_model_base_y >= m->model_dimensions[1] * upscale_fac - (22 * camera_z)) { float alpha_item = 150; if (m->id == 50) { output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = (unsigned char)(((255 - alpha_item) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (alpha_item / 255.0f) * 255)); } else if (m->id == 51) { output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = (unsigned char)(((255 - alpha_item) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (alpha_item / 255.0f) * 255)); } else if (m->id == 52) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = (unsigned char)(((255 - alpha_item) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (alpha_item / 255.0f) * 255)); output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = (unsigned char)(((255 - alpha_item) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (alpha_item / 255.0f) * 146)); } } } for (int s_y = 0; s_y < sampling_filter_dim; s_y++) { for (int s_x = 0; s_x < sampling_filter_dim; s_x++) { if (offset_to_model_base_x + s_x >= 1 && offset_to_model_base_x + s_x < m->model_dimensions[0] * upscale_fac - 1 && offset_to_model_base_y + s_y >= 1 && offset_to_model_base_y + s_y < m->model_dimensions[1] * upscale_fac - 1 ) { float model_palette_idx_x = offset_to_model_base_x + s_x; float model_palette_idx_y = offset_to_model_base_y + s_y; float interpixel_alpha = getInterpixel(p_model, m->model_dimensions[0] * upscale_fac, m->model_dimensions[1] * upscale_fac, 4, model_palette_idx_x, model_palette_idx_y, 3); if (interpixel_alpha >= 64) { if ((entities[entity_id].position[1] + (m->model_dimensions[1] * m->model_scale * entities[entity_id].scale) > player_y_max && entities[entity_id].model_z == player_z_max) || entities[entity_id].model_z > player_z_max) { player_z_max = entities[entity_id].model_z; player_y_max = entities[entity_id].position[1] + (m->model_dimensions[1] * m->model_scale * entities[entity_id].scale); player_id_max = entity_id; s_y = sampling_filter_dim; s_x = sampling_filter_dim; } } } } } } } } } if (player_id_max >= 0) { unsigned int entity_id = player_id_max; //for (int e = 0; e < entities_count; e++) { //unsigned int entity_id = device_data_players[entities_iddata_position + 1 + e]; //if (entity_id < UINT_MAX) { struct model* m = nullptr; const unsigned int* model_positions; if (entities[entity_id].et == ET_PLAYER) { m = &player_models[entities[entity_id].model_id]; model_positions = &device_data_assets[m->model_positions]; } else if (entities[entity_id].et == ET_ITEM) { m = &item_models[entities[entity_id].model_id - 50]; model_positions = &device_data_assets[m->model_positions]; } else if (entities[entity_id].et == ET_STATIC_ASSET) { m = &map_models[entities[entity_id].model_id - 100]; model_positions = &device_data_map[m->model_positions]; } if (m != nullptr) { int upscale = 1; float upscale_fac = 1.0f; unsigned int model_position = model_positions[upscale - 1]; while (camera_z / ((m->model_scale * entities[entity_id].scale) / upscale_fac) < 2 && upscale - 1 < m->model_zoom_level_count - 1) { upscale++; model_position = model_positions[upscale - 1]; upscale_fac *= 2.0f; } sampling_filter_dim = ceilf(camera_z / ((m->model_scale * entities[entity_id].scale) / upscale_fac)); float offset_to_model_base_x = (current_game_x - (entities[entity_id].position[0])) / ((m->model_scale * entities[entity_id].scale) / upscale_fac); float offset_to_model_base_y = (current_game_y - (entities[entity_id].position[1])) / ((m->model_scale * entities[entity_id].scale) / upscale_fac); if (offset_to_model_base_x >= 1 && offset_to_model_base_x < m->model_dimensions[0] * upscale_fac - 1 && offset_to_model_base_y >= 1 && offset_to_model_base_y < m->model_dimensions[1] * upscale_fac - 1) { int animation_tick = (((tick_counter + entities[entity_id].model_animation_offset) / m->model_animation_stepsize) % m->model_animation_ticks); if (m->model_animation_type == 1) { if (((tick_counter + entities[entity_id].model_animation_offset) / m->model_animation_stepsize) / m->model_animation_ticks % 2 == 1) { animation_tick = m->model_animation_ticks - 1 - animation_tick; } } unsigned int* p_model_positions; unsigned char* p_model; if (entities[entity_id].et == ET_STATIC_ASSET) { p_model_positions = (unsigned int*)&device_data_map[model_position]; p_model = (unsigned char*)&device_data_map[p_model_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->model_rotations)) % m->model_rotations) * m->model_animation_ticks + animation_tick]]; } else { p_model_positions = (unsigned int*)&device_data_assets[model_position]; p_model = (unsigned char*)&device_data_assets[p_model_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->model_rotations)) % m->model_rotations) * m->model_animation_ticks + animation_tick]]; } for (int s_y = 0; s_y < sampling_filter_dim; s_y++) { for (int s_x = 0; s_x < sampling_filter_dim; s_x++) { if (offset_to_model_base_x + s_x >= 1 && offset_to_model_base_x + s_x < m->model_dimensions[0] * upscale_fac - 1 && offset_to_model_base_y + s_y >= 1 && offset_to_model_base_y + s_y < m->model_dimensions[1] * upscale_fac - 1 ) { float model_palette_idx_x = offset_to_model_base_x + s_x; float model_palette_idx_y = offset_to_model_base_y + s_y; float interpixel_alpha = getInterpixel(p_model, m->model_dimensions[0] * upscale_fac, m->model_dimensions[1] * upscale_fac, 4, model_palette_idx_x, model_palette_idx_y, 3); if (interpixel_alpha > 0) { if (m->mt == MT_PLAYER && selected_player_eid == entity_id && interpixel_alpha < 255) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = 255;// (unsigned char)(((255 - interpixel_alpha) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (interpixel_alpha / 255.0f) * 200)); } float interpixel = getInterpixel(p_model, m->model_dimensions[0] * upscale_fac, m->model_dimensions[1] * upscale_fac, 4, model_palette_idx_x, model_palette_idx_y, current_channel); output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = (unsigned char)(((255 - interpixel_alpha) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (interpixel_alpha / 255.0f) * interpixel)); } } } } } //} } } for (int e = 0; e < entities_count; e++) { unsigned int entity_id = device_data_rw[entities_iddata_position + 1 + e]; if (entity_id < UINT_MAX) { struct model* m = nullptr; const unsigned int* model_positions; if (entities[entity_id].et == ET_PLAYER) { m = &player_models[entities[entity_id].model_id]; model_positions = &device_data_assets[m->model_positions]; //inventory int inventory_max_id = -1; int* params = (int*)&entities[entity_id].params; int params_pos = 1; for (int ip = 0; ip < 6; ip++) { if (params[params_pos++] < UINT_MAX) inventory_max_id = ip; params_pos++; } float offset_to_model_base_x = (current_game_x - (entities[entity_id].position[0])) / ((m->model_scale * entities[entity_id].scale)); float offset_to_model_base_y = (current_game_y - (entities[entity_id].position[1])) / ((m->model_scale * entities[entity_id].scale)); if (offset_to_model_base_y < 32 * (inventory_max_id + 1) && offset_to_model_base_y >= 0.0f && offset_to_model_base_x + 32.0f >= -3 - 18 && offset_to_model_base_x + 32.0f < 20) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = 200; if (offset_to_model_base_x + 32.0f + 19 >= 0 && offset_to_model_base_x + 32.0f + 19 < 32) { //inventory "text" int letter_idx = (int)(offset_to_model_base_y) / 32; int letter_code = 0; if (params[1 + letter_idx * 2] < UINT_MAX) { if (params[1 + letter_idx * 2] == 50) { letter_code = 1; } else if (params[1 + letter_idx * 2] == 51) { letter_code = 2; } else if (params[1 + letter_idx * 2] == 52) { letter_code = 3; } if (letter_code >= 0 && letter_code <= 127 && device_data_assets[font_position + letter_code] > 0) { unsigned char* letter = (unsigned char*)&device_data_assets[device_data_assets[font_position + letter_code]]; int letter_x = (int)(offset_to_model_base_x + 32.0f + 19) % 32; int letter_y = (int)offset_to_model_base_y % 32; //shooting /* if (params[1 + letter_idx * 2] == 50 && params[1 + letter_idx * 2 + 1] % 15 != 0) { if (letter_x >= 28 && letter_x <= 32 && letter_y >= 7 && letter_y <= 15) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 0; } } */ float letter_alpha = letter[letter_y * (32 * 4) + letter_x * 4 + 3]; if (letter_alpha > 25) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = (unsigned char)(((255 - letter_alpha) / 255.0f * 255 + (letter_alpha / 255.0f) * letter[letter_y * (32 * 4) + letter_x * 4 + current_channel])); } } } } } //top text bg if (offset_to_model_base_y < 3 && offset_to_model_base_y >= -35.0f && offset_to_model_base_x + 32.0f >= -3 - 18 && offset_to_model_base_x + 32.0f < entities[entity_id].name_len * 32 + 3) { int bg_alpha = 150; output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = 200; } //hp bar if (offset_to_model_base_y < 2 && offset_to_model_base_y >= -33.0f && offset_to_model_base_x + 32.0f >= -19 && offset_to_model_base_x + 32.0f < -11) { float hp_percent = entities[entity_id].params[0] / (float)100.0f; float hp_scale_y = 31.0f; if (hp_percent * hp_scale_y - 33.0f >= offset_to_model_base_y) { if (hp_percent > 0.66f) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 0; } else if (hp_percent > 0.33f) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 157; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 0; } else { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 0; } } } //shield bar if (offset_to_model_base_y < 2 && offset_to_model_base_y >= -33.0f && offset_to_model_base_x + 32.0f >= -11 && offset_to_model_base_x + 32.0f < -3) { float shield_percent = entities[entity_id].params[1] / (float)100.0f; float shield_scale_y = 31.0f; if (shield_percent * shield_scale_y - 33.0f >= offset_to_model_base_y) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 25; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255; } } //top text if (offset_to_model_base_y < 0 && offset_to_model_base_y >= -32.0f && offset_to_model_base_x + 32.0f >= 0 && offset_to_model_base_x + 32.0f < entities[entity_id].name_len * 32) { int letter_idx = (int)(offset_to_model_base_x + 32.0f) / 32; int letter_code = (int)entities[entity_id].name[letter_idx]; if (letter_code >= 0 && letter_code <= 127 && device_data_assets[font_position + (int)entities[entity_id].name[letter_idx]] > 0) { unsigned char* letter = (unsigned char*)&device_data_assets[device_data_assets[font_position + (int)entities[entity_id].name[letter_idx]]]; int letter_y = (int)offset_to_model_base_y + 32; int letter_x = ((int)offset_to_model_base_x + 32) % 32; float letter_alpha = letter[letter_y * (32 * 4) + letter_x * 4 + 3]; if (letter_alpha > 25) { //printf("%i ", (int)entities[entity_id].name[letter_idx]); output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = (unsigned char)(((255 - letter_alpha) / 255.0f * 255 + (letter_alpha / 255.0f) * letter[letter_y * (32 * 4) + letter_x * 4 + current_channel])); } } } } } } } } if (draw_pathing) { if (current_channel == 1 && current_y - mouse_position[1] >= 0 && current_y - mouse_position[1] < pathing_brushsize[1]/camera_z && current_x - mouse_position[0] >= 0 && current_x - mouse_position[0] < pathing_brushsize[0]/camera_z) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] / 3 + 170; } unsigned char* frame_pathable = (unsigned char*)&device_data_map[map_pathables]; if (frame_pathable[(int)floorf(current_game_y) * map_dimensions_center[0] + (int)floorf(current_game_x)] == 0) { if (current_channel == 0) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] / 2 + 127; } else { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] / 2; } } } } } void launch_draw_entities_kernel( const unsigned int* device_data_assets, const unsigned int *device_data_map, const unsigned int players_models_position, const unsigned int item_models_position, const unsigned int map_models_position, const unsigned int font_position, const unsigned int* device_data_rw, const unsigned int entities_position, const unsigned int gd_position_in_bf, const unsigned int gd_data_position_in_bf, unsigned int* device_data_output, const unsigned int output_position, const unsigned int output_width, const unsigned int output_height, const unsigned int output_channels, const unsigned int camera_x1, const unsigned int camera_y1, const float camera_z, const struct vector2<unsigned int> mouse_position, const unsigned int tick_counter) { hipError_t err = hipSuccess; int threadsPerBlock = 256; int blocksPerGrid = (output_width * output_height * 3 + threadsPerBlock - 1) / threadsPerBlock; #ifdef PATHING_DEBUG draw_entities_kernel << <blocksPerGrid, threadsPerBlock >> > (device_data_assets, device_data_map, players_models_position, item_models_position, map_models_position, font_position, device_data_rw, entities_position, player_selected_id, gd_position_in_bf, gd_data_position_in_bf, gm.map_dimensions, gm.map_pathable_position, 1, struct vector2<unsigned int>(0, 0), device_data_output, output_position, output_width, output_height, output_channels, camera_x1, camera_y1, camera_z, mouse_position, tick_counter); #else draw_entities_kernel << <blocksPerGrid, threadsPerBlock >> > (device_data_assets, device_data_map, players_models_position, item_models_position, map_models_position, font_position, device_data_rw, entities_position, player_selected_id, gd_position_in_bf, gd_data_position_in_bf, gm.map_dimensions, gm.map_pathable_position, mapeditor_action_type, mapeditor_pathing_brushsize, device_data_output, output_position, output_width, output_height, output_channels, camera_x1, camera_y1, camera_z, mouse_position, tick_counter); #endif err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed in draw_entities_kernel (error code %s)\n", hipGetErrorString(err)); } } void entity_add(string name, enum entity_type et, unsigned int model_id, unsigned int model_z) { struct entity e; e.et = et; for (int i = 0; i < name.length() && i < 50; i++) { e.name[i] = name[i]; e.name_len = i+1; } for (int i = name.length(); i < 50; i++) { e.name[i] = '\0'; } e.scale = 1.0f; e.orientation = (float)(rand() % 360); e.model_id = model_id; e.model_z = model_z; e.model_animation_offset = 0; for (int i = 0; i < 50; i++) { e.params[i] = 0; } entities.push_back(e); } void entities_upload(struct bit_field* bf) { unsigned int size = entities.size() * sizeof(struct entity); entities_size_in_bf = (unsigned int)ceilf(size / (float)sizeof(unsigned int)); entities_position = bit_field_add_bulk(bf, (unsigned int *) entities.data(), entities_size_in_bf, size)+1; }
9c8afa56e8a9fa2617b5ddbfe57c68a859d7155a.cu
#include "Entity.hpp" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "Vector2.hpp" #include "Grid.hpp" #include "Entity.hpp" #include "Player.hpp" #include "Item.hpp" #include "Map.hpp" #include "Model.hpp" #include "MapEditor.hpp" using namespace std; unsigned int entities_size_in_bf = 0; unsigned int entities_position = 0; vector<struct entity> entities; __forceinline__ __device__ float getInterpixel(const unsigned char* frame, const unsigned int width, const unsigned int height, const unsigned int channels, float x, float y, const int c) { int x_i = (int)x; int y_i = (int)y; x -= x_i; y -= y_i; unsigned char value_components[4]; value_components[0] = frame[y_i * (width * channels) + x_i * channels + c]; if (x > 0) { if (x_i + 1 < width) { value_components[1] = frame[y_i * (width * channels) + (x_i + 1) * channels + c]; } else { x = 0.0f; } } if (y > 0) { if (y_i + 1 < height) { value_components[2] = frame[(y_i + 1) * (width * channels) + x_i * channels + c]; if (x > 0) { value_components[3] = frame[(y_i + 1) * (width * channels) + (x_i + 1) * channels + c]; } } else { y = 0.0f; } } float m_0 = 4.0f / 16.0f; float m_1 = 4.0f / 16.0f; float m_2 = 4.0f / 16.0f; float m_3 = 4.0f / 16.0f; float tmp, tmp2; if (x <= 0.5f) { tmp = ((0.5f - x) / 0.5f) * m_1; m_0 += tmp; m_1 -= tmp; m_2 += tmp; m_3 -= tmp; } else { tmp = ((x - 0.5f) / 0.5f) * m_0; m_0 -= tmp; m_1 += tmp; m_2 -= tmp; m_3 += tmp; } if (y <= 0.5f) { tmp = ((0.5f - y) / 0.5f) * m_2; tmp2 = ((0.5f - y) / 0.5f) * m_3; m_0 += tmp; m_1 += tmp2; m_2 -= tmp; m_3 -= tmp2; } else { tmp = ((y - 0.5f) / 0.5f) * m_0; tmp2 = ((y - 0.5f) / 0.5f) * m_1; m_0 -= tmp; m_1 -= tmp2; m_2 += tmp; m_3 += tmp2; } float value = m_0 * value_components[0] + m_1 * value_components[1] + m_2 * value_components[2] + m_3 * value_components[3]; return value; } __global__ void draw_entities_kernel( const unsigned int* device_data_assets, const unsigned int* device_data_map, const unsigned int players_models_position, const unsigned int item_models_position, const unsigned int map_models_position, const unsigned int font_position, const unsigned int* device_data_rw, const unsigned int entities_position, const unsigned int selected_player_eid, const unsigned int gd_position_in_bf, const unsigned int gd_data_position_in_bf, const struct vector2<unsigned int> map_dimensions_center, const unsigned int map_pathables, const unsigned char draw_pathing, const struct vector2<unsigned int> pathing_brushsize, unsigned int* device_data_output, const unsigned int output_position, const unsigned int output_width, const unsigned int output_height, const unsigned int output_channels, const unsigned int camera_x1, const unsigned int camera_y1, const float camera_z, const struct vector2<unsigned int> mouse_position, const unsigned int tick_counter) { int i = blockIdx.x * blockDim.x + threadIdx.x; //unsigned int players_count = device_data_players[players_position-1] / (unsigned int)ceilf(sizeof(struct player) / (float)sizeof(unsigned int)); //struct player* players = (struct player*) &device_data_players[players_position]; struct entity* entities = (struct entity*) &device_data_rw[entities_position]; struct model* player_models = (struct model*) &device_data_assets[players_models_position]; struct model* item_models = (struct model*) &device_data_assets[item_models_position]; struct model* map_models = (struct model*) &device_data_map[map_models_position]; if (i < output_width * output_height * output_channels) { int current_channel = i / (output_width * output_height); int current_idx = i % (output_width * output_height); int current_x = (current_idx % output_width); int current_y = (current_idx / output_width); float current_game_x = camera_x1 + current_x*camera_z; float current_game_y = camera_y1 + current_y*camera_z; float current_mouse_game_x = camera_x1 + mouse_position[0] * camera_z; float current_mouse_game_y = camera_y1 + mouse_position[1] * camera_z; int sampling_filter_dim = ceilf(camera_z); unsigned char* output = (unsigned char*)&device_data_output[output_position]; /*if ((int)(current_game_x) % 32 == 0 || (int)(current_game_y) % 32 == 0) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 3] = 255; }*/ int grid_current_idx = grid_get_index(device_data_rw, gd_position_in_bf, struct vector3<float> (current_game_x, current_game_y, 0.0f)); if (grid_current_idx != -1) { unsigned int entities_iddata_position = device_data_rw[gd_data_position_in_bf + 1 + grid_current_idx]; if (entities_iddata_position > 0) { unsigned int entities_count = device_data_rw[entities_iddata_position]; for (int e = 0; e < entities_count; e++) { unsigned int entity_id = device_data_rw[entities_iddata_position + 1 + e]; if (entity_id < UINT_MAX) { /* if ((int)(current_game_x) % 32 == 0 || (int)(current_game_y) % 32 == 0) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255 * (e + 1 % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255 * (e % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 3] = 100; } */ /* output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255 * (e+1 % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255 * (e % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 3] = 100; */ struct model* m = nullptr; const unsigned int* shadows_positions; if (entities[entity_id].et == ET_PLAYER) { m = &player_models[entities[entity_id].model_id]; shadows_positions = &device_data_assets[m->shadow_positions]; } else if (entities[entity_id].et == ET_ITEM) { m = &item_models[entities[entity_id].model_id - 50]; shadows_positions = &device_data_assets[m->shadow_positions]; } else if (entities[entity_id].et == ET_STATIC_ASSET) { m = &map_models[entities[entity_id].model_id - 100]; shadows_positions = &device_data_map[m->shadow_positions]; } if (m != nullptr) { int upscale = 1; float upscale_fac = 1.0f; unsigned int shadow_position = shadows_positions[upscale - 1]; while (camera_z / ((m->shadow_scale * entities[entity_id].scale) / upscale_fac) < 2 && upscale - 1 < m->shadow_zoom_level_count - 1) { upscale++; shadow_position = shadows_positions[upscale - 1]; upscale_fac *= 2.0f; } sampling_filter_dim = ceilf(camera_z / ((m->shadow_scale * entities[entity_id].scale) / upscale_fac)); float offset_to_model_shadow_base_x = (current_game_x - (entities[entity_id].position[0] + (m->shadow_offset[0] * entities[entity_id].scale))) / ((m->shadow_scale * entities[entity_id].scale) / upscale_fac); float offset_to_model_shadow_base_y = (current_game_y - (entities[entity_id].position[1] + (m->shadow_offset[1] * entities[entity_id].scale))) / ((m->shadow_scale * entities[entity_id].scale) / upscale_fac); if (offset_to_model_shadow_base_x >= 1 && offset_to_model_shadow_base_x < m->shadow_dimensions[0] * upscale_fac - 1 && offset_to_model_shadow_base_y >= 1 && offset_to_model_shadow_base_y < m->shadow_dimensions[1] * upscale_fac - 1) { /* output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255 * (e + 1 % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255 * (e % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 3] = 100; */ int animation_tick = (((tick_counter + entities[entity_id].model_animation_offset) / m->shadow_animation_stepsize) % m->shadow_animation_ticks);; if (m->shadow_animation_type == 1) { if (((tick_counter + entities[entity_id].model_animation_offset) / m->shadow_animation_stepsize) / m->shadow_animation_ticks % 2 == 1) { animation_tick = m->shadow_animation_ticks - 1 - animation_tick; } } unsigned int* p_shadow_positions; unsigned char* p_shadow; if (entities[entity_id].et == ET_STATIC_ASSET) { p_shadow_positions = (unsigned int*)&device_data_map[shadow_position]; p_shadow = (unsigned char*)&device_data_map[p_shadow_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->shadow_rotations)) % m->shadow_rotations) * m->shadow_animation_ticks + animation_tick]]; } else { p_shadow_positions = (unsigned int*)&device_data_assets[shadow_position]; p_shadow = (unsigned char*)&device_data_assets[p_shadow_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->shadow_rotations)) % m->shadow_rotations) * m->shadow_animation_ticks + animation_tick]]; } //enum player_stance p_stance = players[p].player_stance; //enum player_action p_action = players[p].player_action; float interpixel_min = 255.0f; float interpixel_alpha_max = 0.0f; for (int s_y = 0; s_y < sampling_filter_dim; s_y++) { for (int s_x = 0; s_x < sampling_filter_dim; s_x++) { if (offset_to_model_shadow_base_x + s_x >= 1 && offset_to_model_shadow_base_x + s_x < m->shadow_dimensions[0] * upscale_fac - 1 && offset_to_model_shadow_base_y + s_y >= 1 && offset_to_model_shadow_base_y + s_y < m->shadow_dimensions[1] * upscale_fac - 1 ) { float model_palette_idx_x = offset_to_model_shadow_base_x + s_x; float model_palette_idx_y = offset_to_model_shadow_base_y + s_y; float interpixel_alpha = getInterpixel(p_shadow, m->shadow_dimensions[0] * upscale_fac, m->shadow_dimensions[1] * upscale_fac, 4, model_palette_idx_x, model_palette_idx_y, 3); if (interpixel_alpha > 25) { if (interpixel_alpha_max < interpixel_alpha) { interpixel_alpha_max = interpixel_alpha; } float interpixel = getInterpixel(p_shadow, m->shadow_dimensions[0] * upscale_fac, m->shadow_dimensions[1] * upscale_fac, 4, model_palette_idx_x, model_palette_idx_y, current_channel); if (interpixel < interpixel_min) { interpixel_min = interpixel; } //output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = (unsigned char)(((255 - interpixel_alpha) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (interpixel_alpha / 255.0f) * interpixel)); } } } } if (interpixel_alpha_max > 25) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = (unsigned char)(((255 - interpixel_alpha_max) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (interpixel_alpha_max / 255.0f) * interpixel_min)); } } } } } unsigned int player_z_max = 0; float player_y_max = -1.0f; int player_id_max = -1; bool has_text = false; for (int e = 0; e < entities_count; e++) { unsigned int entity_id = device_data_rw[entities_iddata_position + 1 + e]; if (entity_id < UINT_MAX) { struct model* m = nullptr; const unsigned int* model_positions; if (entities[entity_id].et == ET_PLAYER) { m = &player_models[entities[entity_id].model_id]; model_positions = &device_data_assets[m->model_positions]; } else if (entities[entity_id].et == ET_ITEM) { m = &item_models[entities[entity_id].model_id - 50]; model_positions = &device_data_assets[m->model_positions]; } else if (entities[entity_id].et == ET_STATIC_ASSET) { m = &map_models[entities[entity_id].model_id - 100]; model_positions = &device_data_map[m->model_positions]; } if (m != nullptr) { int upscale = 1; float upscale_fac = 1.0f; unsigned int model_position = model_positions[upscale - 1]; while (camera_z / ((m->model_scale * entities[entity_id].scale)/ upscale_fac) < 2 && upscale - 1 < m->model_zoom_level_count - 1) { upscale++; model_position = model_positions[upscale - 1]; upscale_fac *= 2.0f; } sampling_filter_dim = ceilf(camera_z / ((m->model_scale * entities[entity_id].scale) / upscale_fac)); float offset_to_model_base_x = (current_game_x - (entities[entity_id].position[0])) / ((m->model_scale * entities[entity_id].scale) / upscale_fac); float offset_to_model_base_y = (current_game_y - (entities[entity_id].position[1])) / ((m->model_scale * entities[entity_id].scale) / upscale_fac); if (offset_to_model_base_x >= 1 && offset_to_model_base_x < m->model_dimensions[0] * upscale_fac - 1 && offset_to_model_base_y >= 1 && offset_to_model_base_y < m->model_dimensions[1] * upscale_fac - 1) { /* output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255 * (e + 1 % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255 * (e % 2); output[current_y * (output_width * output_channels) + current_x * output_channels + 3] = 100; */ int animation_tick = (((tick_counter + entities[entity_id].model_animation_offset) / m->model_animation_stepsize) % m->model_animation_ticks); if (m->model_animation_type == 1) { if (((tick_counter + entities[entity_id].model_animation_offset) / m->model_animation_stepsize) / m->model_animation_ticks % 2 == 1) { animation_tick = m->model_animation_ticks - 1 - animation_tick; } } unsigned int* p_model_positions; unsigned char* p_model; if (entities[entity_id].et == ET_STATIC_ASSET) { p_model_positions = (unsigned int*)&device_data_map[model_position]; p_model = (unsigned char*)&device_data_map[p_model_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->model_rotations)) % m->model_rotations) * m->model_animation_ticks + animation_tick]]; } else { p_model_positions = (unsigned int*)&device_data_assets[model_position]; p_model = (unsigned char*)&device_data_assets[p_model_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->model_rotations)) % m->model_rotations) * m->model_animation_ticks + animation_tick]]; } if (m->mt == MT_LOOTABLE_ITEM) { if (offset_to_model_base_x <= 22 * camera_z || offset_to_model_base_y <= 22 * camera_z || offset_to_model_base_x >= m->model_dimensions[0] * upscale_fac - (22 * camera_z) || offset_to_model_base_y >= m->model_dimensions[1] * upscale_fac - (22 * camera_z)) { float alpha_item = 150; if (m->id == 50) { output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = (unsigned char)(((255 - alpha_item) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (alpha_item / 255.0f) * 255)); } else if (m->id == 51) { output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = (unsigned char)(((255 - alpha_item) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (alpha_item / 255.0f) * 255)); } else if (m->id == 52) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = (unsigned char)(((255 - alpha_item) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (alpha_item / 255.0f) * 255)); output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = (unsigned char)(((255 - alpha_item) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (alpha_item / 255.0f) * 146)); } } } for (int s_y = 0; s_y < sampling_filter_dim; s_y++) { for (int s_x = 0; s_x < sampling_filter_dim; s_x++) { if (offset_to_model_base_x + s_x >= 1 && offset_to_model_base_x + s_x < m->model_dimensions[0] * upscale_fac - 1 && offset_to_model_base_y + s_y >= 1 && offset_to_model_base_y + s_y < m->model_dimensions[1] * upscale_fac - 1 ) { float model_palette_idx_x = offset_to_model_base_x + s_x; float model_palette_idx_y = offset_to_model_base_y + s_y; float interpixel_alpha = getInterpixel(p_model, m->model_dimensions[0] * upscale_fac, m->model_dimensions[1] * upscale_fac, 4, model_palette_idx_x, model_palette_idx_y, 3); if (interpixel_alpha >= 64) { if ((entities[entity_id].position[1] + (m->model_dimensions[1] * m->model_scale * entities[entity_id].scale) > player_y_max && entities[entity_id].model_z == player_z_max) || entities[entity_id].model_z > player_z_max) { player_z_max = entities[entity_id].model_z; player_y_max = entities[entity_id].position[1] + (m->model_dimensions[1] * m->model_scale * entities[entity_id].scale); player_id_max = entity_id; s_y = sampling_filter_dim; s_x = sampling_filter_dim; } } } } } } } } } if (player_id_max >= 0) { unsigned int entity_id = player_id_max; //for (int e = 0; e < entities_count; e++) { //unsigned int entity_id = device_data_players[entities_iddata_position + 1 + e]; //if (entity_id < UINT_MAX) { struct model* m = nullptr; const unsigned int* model_positions; if (entities[entity_id].et == ET_PLAYER) { m = &player_models[entities[entity_id].model_id]; model_positions = &device_data_assets[m->model_positions]; } else if (entities[entity_id].et == ET_ITEM) { m = &item_models[entities[entity_id].model_id - 50]; model_positions = &device_data_assets[m->model_positions]; } else if (entities[entity_id].et == ET_STATIC_ASSET) { m = &map_models[entities[entity_id].model_id - 100]; model_positions = &device_data_map[m->model_positions]; } if (m != nullptr) { int upscale = 1; float upscale_fac = 1.0f; unsigned int model_position = model_positions[upscale - 1]; while (camera_z / ((m->model_scale * entities[entity_id].scale) / upscale_fac) < 2 && upscale - 1 < m->model_zoom_level_count - 1) { upscale++; model_position = model_positions[upscale - 1]; upscale_fac *= 2.0f; } sampling_filter_dim = ceilf(camera_z / ((m->model_scale * entities[entity_id].scale) / upscale_fac)); float offset_to_model_base_x = (current_game_x - (entities[entity_id].position[0])) / ((m->model_scale * entities[entity_id].scale) / upscale_fac); float offset_to_model_base_y = (current_game_y - (entities[entity_id].position[1])) / ((m->model_scale * entities[entity_id].scale) / upscale_fac); if (offset_to_model_base_x >= 1 && offset_to_model_base_x < m->model_dimensions[0] * upscale_fac - 1 && offset_to_model_base_y >= 1 && offset_to_model_base_y < m->model_dimensions[1] * upscale_fac - 1) { int animation_tick = (((tick_counter + entities[entity_id].model_animation_offset) / m->model_animation_stepsize) % m->model_animation_ticks); if (m->model_animation_type == 1) { if (((tick_counter + entities[entity_id].model_animation_offset) / m->model_animation_stepsize) / m->model_animation_ticks % 2 == 1) { animation_tick = m->model_animation_ticks - 1 - animation_tick; } } unsigned int* p_model_positions; unsigned char* p_model; if (entities[entity_id].et == ET_STATIC_ASSET) { p_model_positions = (unsigned int*)&device_data_map[model_position]; p_model = (unsigned char*)&device_data_map[p_model_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->model_rotations)) % m->model_rotations) * m->model_animation_ticks + animation_tick]]; } else { p_model_positions = (unsigned int*)&device_data_assets[model_position]; p_model = (unsigned char*)&device_data_assets[p_model_positions[((int)(entities[entity_id].orientation / 10 / (36 / m->model_rotations)) % m->model_rotations) * m->model_animation_ticks + animation_tick]]; } for (int s_y = 0; s_y < sampling_filter_dim; s_y++) { for (int s_x = 0; s_x < sampling_filter_dim; s_x++) { if (offset_to_model_base_x + s_x >= 1 && offset_to_model_base_x + s_x < m->model_dimensions[0] * upscale_fac - 1 && offset_to_model_base_y + s_y >= 1 && offset_to_model_base_y + s_y < m->model_dimensions[1] * upscale_fac - 1 ) { float model_palette_idx_x = offset_to_model_base_x + s_x; float model_palette_idx_y = offset_to_model_base_y + s_y; float interpixel_alpha = getInterpixel(p_model, m->model_dimensions[0] * upscale_fac, m->model_dimensions[1] * upscale_fac, 4, model_palette_idx_x, model_palette_idx_y, 3); if (interpixel_alpha > 0) { if (m->mt == MT_PLAYER && selected_player_eid == entity_id && interpixel_alpha < 255) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = 255;// (unsigned char)(((255 - interpixel_alpha) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (interpixel_alpha / 255.0f) * 200)); } float interpixel = getInterpixel(p_model, m->model_dimensions[0] * upscale_fac, m->model_dimensions[1] * upscale_fac, 4, model_palette_idx_x, model_palette_idx_y, current_channel); output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = (unsigned char)(((255 - interpixel_alpha) / 255.0f * output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] + (interpixel_alpha / 255.0f) * interpixel)); } } } } } //} } } for (int e = 0; e < entities_count; e++) { unsigned int entity_id = device_data_rw[entities_iddata_position + 1 + e]; if (entity_id < UINT_MAX) { struct model* m = nullptr; const unsigned int* model_positions; if (entities[entity_id].et == ET_PLAYER) { m = &player_models[entities[entity_id].model_id]; model_positions = &device_data_assets[m->model_positions]; //inventory int inventory_max_id = -1; int* params = (int*)&entities[entity_id].params; int params_pos = 1; for (int ip = 0; ip < 6; ip++) { if (params[params_pos++] < UINT_MAX) inventory_max_id = ip; params_pos++; } float offset_to_model_base_x = (current_game_x - (entities[entity_id].position[0])) / ((m->model_scale * entities[entity_id].scale)); float offset_to_model_base_y = (current_game_y - (entities[entity_id].position[1])) / ((m->model_scale * entities[entity_id].scale)); if (offset_to_model_base_y < 32 * (inventory_max_id + 1) && offset_to_model_base_y >= 0.0f && offset_to_model_base_x + 32.0f >= -3 - 18 && offset_to_model_base_x + 32.0f < 20) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = 200; if (offset_to_model_base_x + 32.0f + 19 >= 0 && offset_to_model_base_x + 32.0f + 19 < 32) { //inventory "text" int letter_idx = (int)(offset_to_model_base_y) / 32; int letter_code = 0; if (params[1 + letter_idx * 2] < UINT_MAX) { if (params[1 + letter_idx * 2] == 50) { letter_code = 1; } else if (params[1 + letter_idx * 2] == 51) { letter_code = 2; } else if (params[1 + letter_idx * 2] == 52) { letter_code = 3; } if (letter_code >= 0 && letter_code <= 127 && device_data_assets[font_position + letter_code] > 0) { unsigned char* letter = (unsigned char*)&device_data_assets[device_data_assets[font_position + letter_code]]; int letter_x = (int)(offset_to_model_base_x + 32.0f + 19) % 32; int letter_y = (int)offset_to_model_base_y % 32; //shooting /* if (params[1 + letter_idx * 2] == 50 && params[1 + letter_idx * 2 + 1] % 15 != 0) { if (letter_x >= 28 && letter_x <= 32 && letter_y >= 7 && letter_y <= 15) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 0; } } */ float letter_alpha = letter[letter_y * (32 * 4) + letter_x * 4 + 3]; if (letter_alpha > 25) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = (unsigned char)(((255 - letter_alpha) / 255.0f * 255 + (letter_alpha / 255.0f) * letter[letter_y * (32 * 4) + letter_x * 4 + current_channel])); } } } } } //top text bg if (offset_to_model_base_y < 3 && offset_to_model_base_y >= -35.0f && offset_to_model_base_x + 32.0f >= -3 - 18 && offset_to_model_base_x + 32.0f < entities[entity_id].name_len * 32 + 3) { int bg_alpha = 150; output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = 200; } //hp bar if (offset_to_model_base_y < 2 && offset_to_model_base_y >= -33.0f && offset_to_model_base_x + 32.0f >= -19 && offset_to_model_base_x + 32.0f < -11) { float hp_percent = entities[entity_id].params[0] / (float)100.0f; float hp_scale_y = 31.0f; if (hp_percent * hp_scale_y - 33.0f >= offset_to_model_base_y) { if (hp_percent > 0.66f) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 0; } else if (hp_percent > 0.33f) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 157; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 0; } else { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 0; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 0; } } } //shield bar if (offset_to_model_base_y < 2 && offset_to_model_base_y >= -33.0f && offset_to_model_base_x + 32.0f >= -11 && offset_to_model_base_x + 32.0f < -3) { float shield_percent = entities[entity_id].params[1] / (float)100.0f; float shield_scale_y = 31.0f; if (shield_percent * shield_scale_y - 33.0f >= offset_to_model_base_y) { output[current_y * (output_width * output_channels) + current_x * output_channels + 0] = 25; output[current_y * (output_width * output_channels) + current_x * output_channels + 1] = 255; output[current_y * (output_width * output_channels) + current_x * output_channels + 2] = 255; } } //top text if (offset_to_model_base_y < 0 && offset_to_model_base_y >= -32.0f && offset_to_model_base_x + 32.0f >= 0 && offset_to_model_base_x + 32.0f < entities[entity_id].name_len * 32) { int letter_idx = (int)(offset_to_model_base_x + 32.0f) / 32; int letter_code = (int)entities[entity_id].name[letter_idx]; if (letter_code >= 0 && letter_code <= 127 && device_data_assets[font_position + (int)entities[entity_id].name[letter_idx]] > 0) { unsigned char* letter = (unsigned char*)&device_data_assets[device_data_assets[font_position + (int)entities[entity_id].name[letter_idx]]]; int letter_y = (int)offset_to_model_base_y + 32; int letter_x = ((int)offset_to_model_base_x + 32) % 32; float letter_alpha = letter[letter_y * (32 * 4) + letter_x * 4 + 3]; if (letter_alpha > 25) { //printf("%i ", (int)entities[entity_id].name[letter_idx]); output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = (unsigned char)(((255 - letter_alpha) / 255.0f * 255 + (letter_alpha / 255.0f) * letter[letter_y * (32 * 4) + letter_x * 4 + current_channel])); } } } } } } } } if (draw_pathing) { if (current_channel == 1 && current_y - mouse_position[1] >= 0 && current_y - mouse_position[1] < pathing_brushsize[1]/camera_z && current_x - mouse_position[0] >= 0 && current_x - mouse_position[0] < pathing_brushsize[0]/camera_z) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] / 3 + 170; } unsigned char* frame_pathable = (unsigned char*)&device_data_map[map_pathables]; if (frame_pathable[(int)floorf(current_game_y) * map_dimensions_center[0] + (int)floorf(current_game_x)] == 0) { if (current_channel == 0) { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] / 2 + 127; } else { output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] = output[current_y * (output_width * output_channels) + current_x * output_channels + current_channel] / 2; } } } } } void launch_draw_entities_kernel( const unsigned int* device_data_assets, const unsigned int *device_data_map, const unsigned int players_models_position, const unsigned int item_models_position, const unsigned int map_models_position, const unsigned int font_position, const unsigned int* device_data_rw, const unsigned int entities_position, const unsigned int gd_position_in_bf, const unsigned int gd_data_position_in_bf, unsigned int* device_data_output, const unsigned int output_position, const unsigned int output_width, const unsigned int output_height, const unsigned int output_channels, const unsigned int camera_x1, const unsigned int camera_y1, const float camera_z, const struct vector2<unsigned int> mouse_position, const unsigned int tick_counter) { cudaError_t err = cudaSuccess; int threadsPerBlock = 256; int blocksPerGrid = (output_width * output_height * 3 + threadsPerBlock - 1) / threadsPerBlock; #ifdef PATHING_DEBUG draw_entities_kernel << <blocksPerGrid, threadsPerBlock >> > (device_data_assets, device_data_map, players_models_position, item_models_position, map_models_position, font_position, device_data_rw, entities_position, player_selected_id, gd_position_in_bf, gd_data_position_in_bf, gm.map_dimensions, gm.map_pathable_position, 1, struct vector2<unsigned int>(0, 0), device_data_output, output_position, output_width, output_height, output_channels, camera_x1, camera_y1, camera_z, mouse_position, tick_counter); #else draw_entities_kernel << <blocksPerGrid, threadsPerBlock >> > (device_data_assets, device_data_map, players_models_position, item_models_position, map_models_position, font_position, device_data_rw, entities_position, player_selected_id, gd_position_in_bf, gd_data_position_in_bf, gm.map_dimensions, gm.map_pathable_position, mapeditor_action_type, mapeditor_pathing_brushsize, device_data_output, output_position, output_width, output_height, output_channels, camera_x1, camera_y1, camera_z, mouse_position, tick_counter); #endif err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed in draw_entities_kernel (error code %s)\n", cudaGetErrorString(err)); } } void entity_add(string name, enum entity_type et, unsigned int model_id, unsigned int model_z) { struct entity e; e.et = et; for (int i = 0; i < name.length() && i < 50; i++) { e.name[i] = name[i]; e.name_len = i+1; } for (int i = name.length(); i < 50; i++) { e.name[i] = '\0'; } e.scale = 1.0f; e.orientation = (float)(rand() % 360); e.model_id = model_id; e.model_z = model_z; e.model_animation_offset = 0; for (int i = 0; i < 50; i++) { e.params[i] = 0; } entities.push_back(e); } void entities_upload(struct bit_field* bf) { unsigned int size = entities.size() * sizeof(struct entity); entities_size_in_bf = (unsigned int)ceilf(size / (float)sizeof(unsigned int)); entities_position = bit_field_add_bulk(bf, (unsigned int *) entities.data(), entities_size_in_bf, size)+1; }
e45afb9b18b102c0f171973e8873f6aaba7d4e51.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <chrono> #include <sstream> #include <iostream> #include <fstream> #include "tuningParameters.h" #include "qtclib.h" #include "OptionParser.h" #include "libdata.h" #include "cudacommon.h" #define _USE_MATH_DEFINES #include <float.h> #include "comm.h" using namespace std; #include "kernels_common.h" #include "kernels_compact_storage.h" // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing. The user is allowed to specify // the size of the input data in megabytes if they are not using a // predefined size (i.e. the -s option). // // Arguments: // op: the options parser / parameter database // // Programmer: Anthony Danalis // Creation: February 04, 2011 // Returns: nothing // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op){ op.addOption("PointCount", OPT_INT, "4096", "point count (default: 4096)"); op.addOption("Threshold", OPT_FLOAT, "1", "cluster diameter threshold (default: 1)"); op.addOption("SaveOutput", OPT_BOOL, "", "Save output results in files (default: false)"); op.addOption("Verbose", OPT_BOOL, "", "Print cluster cardinalities (default: false)"); } // **************************************************************************** // Function: RunBenchmark // // Purpose: // Calls single precision and, if viable, double precision QT-Clustering // benchmark. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name, OptionParser& op); void RunBenchmark(OptionParser &op){ runTest("QTC", op); } // **************************************************************************** // Function: calculate_participants // // Purpose: // This function decides how many GPUs (up to the maximum requested by the user) // and threadblocks per GPU will be used. It also returns the total number of // thread-blocks across all GPUs and the number of thread-blocks that are in nodes // before the current one. // In the future, the behavior of this function should be decided based on // auto-tuning instead of arbitrary decisions. // // Arguments: // The number of nodes requested by the user and the four // variables that the function computes (passed by reference) // // // Returns: nothing // // Programmer: Anthony Danalis // Creation: May 25, 2011 // // **************************************************************************** void calculate_participants(int point_count, int node_count, int cwrank, int *thread_block_count, int *total_thread_block_count, int *active_node_count){ int ac_nd_cnt, thr_blc_cnt, total_thr_blc_cnt; ac_nd_cnt = node_count; if( point_count <= (node_count-1) * SM_COUNT * GPU_MIN_SATURATION_FACTOR ){ int K = SM_COUNT * GPU_MIN_SATURATION_FACTOR; ac_nd_cnt = (point_count+K-1) / K; } if( point_count >= ac_nd_cnt * SM_COUNT * OVR_SBSCR_FACTOR ){ thr_blc_cnt = SM_COUNT * OVR_SBSCR_FACTOR; total_thr_blc_cnt = thr_blc_cnt * ac_nd_cnt; }else{ thr_blc_cnt = point_count/ac_nd_cnt; if( cwrank < point_count%ac_nd_cnt ){ thr_blc_cnt++; } total_thr_blc_cnt = point_count; } *active_node_count = ac_nd_cnt; *thread_block_count = thr_blc_cnt; *total_thread_block_count = total_thr_blc_cnt; return; } // **************************************************************************** // Function: runTest // // Purpose: // This benchmark measures the performance of applying QT-clustering on // single precision data. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name, OptionParser& op) { int matrix_type = 0x0; if( 0 == comm_get_rank() ){ matrix_type |= GLOBAL_MEMORY; // find out what type of distance matrix we will be using. matrix_type |= COMPACT_STORAGE_MATRIX; } comm_broadcast ( &matrix_type, 1, COMM_TYPE_INT, 0); QTC(name, op, matrix_type); } void QTC(const string& name, OptionParser& op, int matrix_type){ ofstream debug_out, seeds_out; void *Ai_mask, *cardnl, *ungrpd_pnts_indr, *clustered_pnts_mask, *result, *dist_to_clust; void *indr_mtrx, *degrees; int *indr_mtrx_host, *ungrpd_pnts_indr_host, *cardinalities, *output; bool save_clusters = false; bool be_verbose = false; void *distance_matrix_gmem, *distance_matrix; float *dist_source, *pnts = NULL; float threshold = 1.0f; unsigned long int i; int max_degree, thread_block_count, total_thread_block_count, active_node_count; int cwrank=0, node_count=1, tpb, max_card, iter=0; unsigned long int dst_matrix_elems, point_count, max_point_count; point_count = op.getOptionInt("PointCount"); threshold = op.getOptionFloat("Threshold"); save_clusters = op.getOptionBool("SaveOutput"); be_verbose = op.getOptionBool("Verbose"); // TODO - only deal with this size-switch once int def_size = op.getOptionInt("size"); switch( def_size ) { case 1: // size == 1 should match default values of PointCount, // Threshold, TextureMem, and CompactStorage parameters. // (i.e., -s 1 is the default) point_count = 4*1024; break; case 2: point_count = 8*1024; break; case 3: point_count = 16*1024; break; case 4: point_count = 16*1024; break; case 5: point_count = 26*1024; break; default: fprintf( stderr, "unsupported size %d given; terminating\n", def_size ); return; } cwrank = comm_get_rank(); node_count = comm_get_size(); if( cwrank == 0 ){ pnts = generate_synthetic_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, point_count, matrix_type); } comm_broadcast ( &point_count, 1, COMM_TYPE_INT, 0); comm_broadcast ( &max_degree, 1, COMM_TYPE_INT, 0); dst_matrix_elems = point_count*max_degree; if( cwrank != 0 ){ // For all nodes except zero, in a distributed run. dist_source = (float*) malloc (sizeof(float)*dst_matrix_elems); indr_mtrx_host = (int*) malloc (sizeof(int)*point_count*max_degree); } // If we need to print the actual clusters later on, we'll need to have all points in all nodes. if( save_clusters ){ if( cwrank != 0 ){ pnts = (float *)malloc( 2*point_count*sizeof(float) ); } comm_broadcast ( pnts, 2*point_count, COMM_TYPE_FLOAT, 0); } comm_broadcast ( dist_source, dst_matrix_elems, COMM_TYPE_FLOAT, 0); comm_broadcast ( indr_mtrx_host, point_count*max_degree, COMM_TYPE_INT, 0); assert( max_degree > 0 ); calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); ungrpd_pnts_indr_host = (int*) malloc (sizeof(int)*point_count); for(i=0; i<point_count; i++){ ungrpd_pnts_indr_host[i] = i; } cardinalities = (int*) malloc (sizeof(int)*2); output = (int*) malloc (sizeof(int)*max_degree); allocDeviceBuffer(&distance_matrix_gmem, dst_matrix_elems*sizeof(float)); CHECK_CUDA_ERROR(); // This is the N*Delta indirection matrix allocDeviceBuffer(&indr_mtrx, point_count*max_degree*sizeof(int)); allocDeviceBuffer(&degrees, point_count*sizeof(int)); allocDeviceBuffer(&ungrpd_pnts_indr, point_count*sizeof(int)); allocDeviceBuffer(&Ai_mask, thread_block_count*point_count*sizeof(char)); allocDeviceBuffer(&dist_to_clust, thread_block_count*max_degree*sizeof(float)); allocDeviceBuffer(&clustered_pnts_mask, point_count*sizeof(char)); allocDeviceBuffer(&cardnl, thread_block_count*2*sizeof(int)); allocDeviceBuffer(&result, point_count*sizeof(int)); #ifdef DEBUG int* cardinalities_debug = (int*) malloc (sizeof(int)*thread_block_count*2); #endif // Copy to device, and record transfer time copyToDevice(distance_matrix_gmem, dist_source, dst_matrix_elems*sizeof(float)); copyToDevice(indr_mtrx, indr_mtrx_host, point_count*max_degree*sizeof(int)); copyToDevice(ungrpd_pnts_indr, ungrpd_pnts_indr_host, point_count*sizeof(int)); hipMemset(clustered_pnts_mask, 0, point_count*sizeof(char)); hipMemset(dist_to_clust, 0, max_degree*thread_block_count*sizeof(float)); tpb = ( point_count > THREADSPERBLOCK )? THREADSPERBLOCK : point_count; hipLaunchKernelGGL(( compute_degrees), dim3(thread_block_count), dim3(tpb), 0, 0, (int *)indr_mtrx, (int *)degrees, point_count, max_degree); hipDeviceSynchronize(); CHECK_CUDA_ERROR(); // The names of the saved outputs, if enabled, are "p", "p_seeds", and "p." if( 0 == cwrank ){ if( save_clusters ){ debug_out.open("p"); for(i=0; i<point_count; i++){ debug_out << pnts[2*i] << " " << pnts[2*i+1] << endl; } debug_out.close(); seeds_out.open("p_seeds"); } cout << "\nInitial ThreadBlockCount: " << thread_block_count; cout << " PointCount: " << point_count; cout << " Max degree: " << max_degree << "\n" << endl; cout.flush(); } max_point_count = point_count; tpb = THREADSPERBLOCK; distance_matrix = distance_matrix_gmem; // Kernel execution double qtc_time = 0.0, trim_time = 0.0, update_time = 0.0; do{ stringstream ss; int winner_node=-1; int winner_index=-1; bool this_node_participates = true; ++iter; calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); // If there are only a few elements left to cluster, reduce the number of participating nodes (GPUs). if( cwrank >= active_node_count ){ this_node_participates = false; } comm_update_communicator(cwrank, active_node_count); if( !this_node_participates ) break; cwrank = comm_get_rank(); auto start = std::chrono::steady_clock::now(); // Main kernel hipLaunchKernelGGL(( QTC_device), dim3(thread_block_count), dim3(tpb), 0, 0, (float*)distance_matrix, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (int *)ungrpd_pnts_indr, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold, cwrank, active_node_count, total_thread_block_count); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); qtc_time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); CHECK_CUDA_ERROR(); #ifdef DEBUG printf("cardinalities\n"); copyFromDevice( cardinalities_debug, cardnl, 2*576*sizeof(int) ); for (int i = 0; i < 576*2; i++) printf("%d %d\n", i, cardinalities_debug[i]); #endif if( thread_block_count > 1 ){ // We are reducing 128 numbers or less, so one thread should be sufficient. hipLaunchKernelGGL(( reduce_card_device), dim3(1), dim3(1), 0, 0, (int *)cardnl, thread_block_count); hipDeviceSynchronize(); CHECK_CUDA_ERROR(); } copyFromDevice( cardinalities, cardnl, 2*sizeof(int) ); max_card = cardinalities[0]; winner_index = cardinalities[1]; comm_barrier(); comm_find_winner(&max_card, &winner_node, &winner_index, cwrank, max_point_count+1); if( be_verbose && cwrank == winner_node){ // for non-parallel cases, both "cwrank" and "winner_node" should be zero. cout << "[" << cwrank << "] Cluster Cardinality: " << max_card << " (Node: " << cwrank << ", index: " << winner_index << ")" << endl; } start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( trim_ungrouped_pnts_indr_array), dim3(1), dim3(tpb), 0, 0, winner_index, (int*)ungrpd_pnts_indr, (float*)distance_matrix, (int *)result, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold); hipDeviceSynchronize(); CHECK_CUDA_ERROR(); end = std::chrono::steady_clock::now(); trim_time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); if( cwrank == winner_node){ // for non-parallel cases, these should both be zero. if( save_clusters ){ ss << "p." << iter; debug_out.open(ss.str().c_str()); } copyFromDevice(output, (void *)result, max_card*sizeof(int) ); if( save_clusters ){ for(int i=0; i<max_card; i++){ debug_out << pnts[2*output[i]] << " " << pnts[2*output[i]+1] << endl; } seeds_out << pnts[2*winner_index] << " " << pnts[2*winner_index+1] << endl; debug_out.close(); } } start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( update_clustered_pnts_mask), dim3(1), dim3(tpb), 0, 0, (char *)clustered_pnts_mask, (char *)Ai_mask, max_point_count); hipDeviceSynchronize(); CHECK_CUDA_ERROR(); end = std::chrono::steady_clock::now(); update_time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); point_count -= max_card; }while( max_card > 1 && point_count ); if( save_clusters ){ seeds_out.close(); } if( cwrank == 0){ cout << "QTC is complete. Clustering iteration count: " << iter << endl; cout << "\nKernel execution time\n"; cout << "qtc: " << qtc_time * 1e-9f << " (s)\n"; cout << "trim: " << trim_time * 1e-9f << " (s)\n"; cout << "update: " << update_time * 1e-9f << " (s)\n"; cout << "total: " << (qtc_time + trim_time + update_time) * 1e-9f << " (s)\n"; cout.flush(); } free(dist_source); free(indr_mtrx_host); free(output); free(cardinalities); #ifdef DEBUG free(cardinalities_debug); #endif freeDeviceBuffer(distance_matrix_gmem); freeDeviceBuffer(indr_mtrx); freeDeviceBuffer(Ai_mask); freeDeviceBuffer(cardnl); freeDeviceBuffer(result); return; } //////////////////////////////////////////////////////////////////////////////// void allocDeviceBuffer(void** bufferp, unsigned long bytes) { hipMalloc(bufferp, bytes); CHECK_CUDA_ERROR(); } void freeDeviceBuffer(void* buffer) { hipFree(buffer); } void copyToDevice(void* to_device, void* from_host, unsigned long bytes) { hipMemcpy(to_device, from_host, bytes, hipMemcpyHostToDevice); CHECK_CUDA_ERROR(); } void copyFromDevice(void* to_host, void* from_device, unsigned long bytes) { hipMemcpy(to_host, from_device, bytes, hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(); }
e45afb9b18b102c0f171973e8873f6aaba7d4e51.cu
#include <math.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <chrono> #include <sstream> #include <iostream> #include <fstream> #include "tuningParameters.h" #include "qtclib.h" #include "OptionParser.h" #include "libdata.h" #include "cudacommon.h" #define _USE_MATH_DEFINES #include <float.h> #include "comm.h" using namespace std; #include "kernels_common.h" #include "kernels_compact_storage.h" // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing. The user is allowed to specify // the size of the input data in megabytes if they are not using a // predefined size (i.e. the -s option). // // Arguments: // op: the options parser / parameter database // // Programmer: Anthony Danalis // Creation: February 04, 2011 // Returns: nothing // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op){ op.addOption("PointCount", OPT_INT, "4096", "point count (default: 4096)"); op.addOption("Threshold", OPT_FLOAT, "1", "cluster diameter threshold (default: 1)"); op.addOption("SaveOutput", OPT_BOOL, "", "Save output results in files (default: false)"); op.addOption("Verbose", OPT_BOOL, "", "Print cluster cardinalities (default: false)"); } // **************************************************************************** // Function: RunBenchmark // // Purpose: // Calls single precision and, if viable, double precision QT-Clustering // benchmark. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name, OptionParser& op); void RunBenchmark(OptionParser &op){ runTest("QTC", op); } // **************************************************************************** // Function: calculate_participants // // Purpose: // This function decides how many GPUs (up to the maximum requested by the user) // and threadblocks per GPU will be used. It also returns the total number of // thread-blocks across all GPUs and the number of thread-blocks that are in nodes // before the current one. // In the future, the behavior of this function should be decided based on // auto-tuning instead of arbitrary decisions. // // Arguments: // The number of nodes requested by the user and the four // variables that the function computes (passed by reference) // // // Returns: nothing // // Programmer: Anthony Danalis // Creation: May 25, 2011 // // **************************************************************************** void calculate_participants(int point_count, int node_count, int cwrank, int *thread_block_count, int *total_thread_block_count, int *active_node_count){ int ac_nd_cnt, thr_blc_cnt, total_thr_blc_cnt; ac_nd_cnt = node_count; if( point_count <= (node_count-1) * SM_COUNT * GPU_MIN_SATURATION_FACTOR ){ int K = SM_COUNT * GPU_MIN_SATURATION_FACTOR; ac_nd_cnt = (point_count+K-1) / K; } if( point_count >= ac_nd_cnt * SM_COUNT * OVR_SBSCR_FACTOR ){ thr_blc_cnt = SM_COUNT * OVR_SBSCR_FACTOR; total_thr_blc_cnt = thr_blc_cnt * ac_nd_cnt; }else{ thr_blc_cnt = point_count/ac_nd_cnt; if( cwrank < point_count%ac_nd_cnt ){ thr_blc_cnt++; } total_thr_blc_cnt = point_count; } *active_node_count = ac_nd_cnt; *thread_block_count = thr_blc_cnt; *total_thread_block_count = total_thr_blc_cnt; return; } // **************************************************************************** // Function: runTest // // Purpose: // This benchmark measures the performance of applying QT-clustering on // single precision data. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name, OptionParser& op) { int matrix_type = 0x0; if( 0 == comm_get_rank() ){ matrix_type |= GLOBAL_MEMORY; // find out what type of distance matrix we will be using. matrix_type |= COMPACT_STORAGE_MATRIX; } comm_broadcast ( &matrix_type, 1, COMM_TYPE_INT, 0); QTC(name, op, matrix_type); } void QTC(const string& name, OptionParser& op, int matrix_type){ ofstream debug_out, seeds_out; void *Ai_mask, *cardnl, *ungrpd_pnts_indr, *clustered_pnts_mask, *result, *dist_to_clust; void *indr_mtrx, *degrees; int *indr_mtrx_host, *ungrpd_pnts_indr_host, *cardinalities, *output; bool save_clusters = false; bool be_verbose = false; void *distance_matrix_gmem, *distance_matrix; float *dist_source, *pnts = NULL; float threshold = 1.0f; unsigned long int i; int max_degree, thread_block_count, total_thread_block_count, active_node_count; int cwrank=0, node_count=1, tpb, max_card, iter=0; unsigned long int dst_matrix_elems, point_count, max_point_count; point_count = op.getOptionInt("PointCount"); threshold = op.getOptionFloat("Threshold"); save_clusters = op.getOptionBool("SaveOutput"); be_verbose = op.getOptionBool("Verbose"); // TODO - only deal with this size-switch once int def_size = op.getOptionInt("size"); switch( def_size ) { case 1: // size == 1 should match default values of PointCount, // Threshold, TextureMem, and CompactStorage parameters. // (i.e., -s 1 is the default) point_count = 4*1024; break; case 2: point_count = 8*1024; break; case 3: point_count = 16*1024; break; case 4: point_count = 16*1024; break; case 5: point_count = 26*1024; break; default: fprintf( stderr, "unsupported size %d given; terminating\n", def_size ); return; } cwrank = comm_get_rank(); node_count = comm_get_size(); if( cwrank == 0 ){ pnts = generate_synthetic_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, point_count, matrix_type); } comm_broadcast ( &point_count, 1, COMM_TYPE_INT, 0); comm_broadcast ( &max_degree, 1, COMM_TYPE_INT, 0); dst_matrix_elems = point_count*max_degree; if( cwrank != 0 ){ // For all nodes except zero, in a distributed run. dist_source = (float*) malloc (sizeof(float)*dst_matrix_elems); indr_mtrx_host = (int*) malloc (sizeof(int)*point_count*max_degree); } // If we need to print the actual clusters later on, we'll need to have all points in all nodes. if( save_clusters ){ if( cwrank != 0 ){ pnts = (float *)malloc( 2*point_count*sizeof(float) ); } comm_broadcast ( pnts, 2*point_count, COMM_TYPE_FLOAT, 0); } comm_broadcast ( dist_source, dst_matrix_elems, COMM_TYPE_FLOAT, 0); comm_broadcast ( indr_mtrx_host, point_count*max_degree, COMM_TYPE_INT, 0); assert( max_degree > 0 ); calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); ungrpd_pnts_indr_host = (int*) malloc (sizeof(int)*point_count); for(i=0; i<point_count; i++){ ungrpd_pnts_indr_host[i] = i; } cardinalities = (int*) malloc (sizeof(int)*2); output = (int*) malloc (sizeof(int)*max_degree); allocDeviceBuffer(&distance_matrix_gmem, dst_matrix_elems*sizeof(float)); CHECK_CUDA_ERROR(); // This is the N*Delta indirection matrix allocDeviceBuffer(&indr_mtrx, point_count*max_degree*sizeof(int)); allocDeviceBuffer(&degrees, point_count*sizeof(int)); allocDeviceBuffer(&ungrpd_pnts_indr, point_count*sizeof(int)); allocDeviceBuffer(&Ai_mask, thread_block_count*point_count*sizeof(char)); allocDeviceBuffer(&dist_to_clust, thread_block_count*max_degree*sizeof(float)); allocDeviceBuffer(&clustered_pnts_mask, point_count*sizeof(char)); allocDeviceBuffer(&cardnl, thread_block_count*2*sizeof(int)); allocDeviceBuffer(&result, point_count*sizeof(int)); #ifdef DEBUG int* cardinalities_debug = (int*) malloc (sizeof(int)*thread_block_count*2); #endif // Copy to device, and record transfer time copyToDevice(distance_matrix_gmem, dist_source, dst_matrix_elems*sizeof(float)); copyToDevice(indr_mtrx, indr_mtrx_host, point_count*max_degree*sizeof(int)); copyToDevice(ungrpd_pnts_indr, ungrpd_pnts_indr_host, point_count*sizeof(int)); cudaMemset(clustered_pnts_mask, 0, point_count*sizeof(char)); cudaMemset(dist_to_clust, 0, max_degree*thread_block_count*sizeof(float)); tpb = ( point_count > THREADSPERBLOCK )? THREADSPERBLOCK : point_count; compute_degrees<<<thread_block_count, tpb>>>((int *)indr_mtrx, (int *)degrees, point_count, max_degree); cudaDeviceSynchronize(); CHECK_CUDA_ERROR(); // The names of the saved outputs, if enabled, are "p", "p_seeds", and "p." if( 0 == cwrank ){ if( save_clusters ){ debug_out.open("p"); for(i=0; i<point_count; i++){ debug_out << pnts[2*i] << " " << pnts[2*i+1] << endl; } debug_out.close(); seeds_out.open("p_seeds"); } cout << "\nInitial ThreadBlockCount: " << thread_block_count; cout << " PointCount: " << point_count; cout << " Max degree: " << max_degree << "\n" << endl; cout.flush(); } max_point_count = point_count; tpb = THREADSPERBLOCK; distance_matrix = distance_matrix_gmem; // Kernel execution double qtc_time = 0.0, trim_time = 0.0, update_time = 0.0; do{ stringstream ss; int winner_node=-1; int winner_index=-1; bool this_node_participates = true; ++iter; calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); // If there are only a few elements left to cluster, reduce the number of participating nodes (GPUs). if( cwrank >= active_node_count ){ this_node_participates = false; } comm_update_communicator(cwrank, active_node_count); if( !this_node_participates ) break; cwrank = comm_get_rank(); auto start = std::chrono::steady_clock::now(); // Main kernel QTC_device<<<thread_block_count, tpb>>>((float*)distance_matrix, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (int *)ungrpd_pnts_indr, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold, cwrank, active_node_count, total_thread_block_count); cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); qtc_time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); CHECK_CUDA_ERROR(); #ifdef DEBUG printf("cardinalities\n"); copyFromDevice( cardinalities_debug, cardnl, 2*576*sizeof(int) ); for (int i = 0; i < 576*2; i++) printf("%d %d\n", i, cardinalities_debug[i]); #endif if( thread_block_count > 1 ){ // We are reducing 128 numbers or less, so one thread should be sufficient. reduce_card_device<<<1, 1>>>((int *)cardnl, thread_block_count); cudaDeviceSynchronize(); CHECK_CUDA_ERROR(); } copyFromDevice( cardinalities, cardnl, 2*sizeof(int) ); max_card = cardinalities[0]; winner_index = cardinalities[1]; comm_barrier(); comm_find_winner(&max_card, &winner_node, &winner_index, cwrank, max_point_count+1); if( be_verbose && cwrank == winner_node){ // for non-parallel cases, both "cwrank" and "winner_node" should be zero. cout << "[" << cwrank << "] Cluster Cardinality: " << max_card << " (Node: " << cwrank << ", index: " << winner_index << ")" << endl; } start = std::chrono::steady_clock::now(); trim_ungrouped_pnts_indr_array<<<1, tpb>>>(winner_index, (int*)ungrpd_pnts_indr, (float*)distance_matrix, (int *)result, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold); cudaDeviceSynchronize(); CHECK_CUDA_ERROR(); end = std::chrono::steady_clock::now(); trim_time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); if( cwrank == winner_node){ // for non-parallel cases, these should both be zero. if( save_clusters ){ ss << "p." << iter; debug_out.open(ss.str().c_str()); } copyFromDevice(output, (void *)result, max_card*sizeof(int) ); if( save_clusters ){ for(int i=0; i<max_card; i++){ debug_out << pnts[2*output[i]] << " " << pnts[2*output[i]+1] << endl; } seeds_out << pnts[2*winner_index] << " " << pnts[2*winner_index+1] << endl; debug_out.close(); } } start = std::chrono::steady_clock::now(); update_clustered_pnts_mask<<<1, tpb>>>((char *)clustered_pnts_mask, (char *)Ai_mask, max_point_count); cudaDeviceSynchronize(); CHECK_CUDA_ERROR(); end = std::chrono::steady_clock::now(); update_time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); point_count -= max_card; }while( max_card > 1 && point_count ); if( save_clusters ){ seeds_out.close(); } if( cwrank == 0){ cout << "QTC is complete. Clustering iteration count: " << iter << endl; cout << "\nKernel execution time\n"; cout << "qtc: " << qtc_time * 1e-9f << " (s)\n"; cout << "trim: " << trim_time * 1e-9f << " (s)\n"; cout << "update: " << update_time * 1e-9f << " (s)\n"; cout << "total: " << (qtc_time + trim_time + update_time) * 1e-9f << " (s)\n"; cout.flush(); } free(dist_source); free(indr_mtrx_host); free(output); free(cardinalities); #ifdef DEBUG free(cardinalities_debug); #endif freeDeviceBuffer(distance_matrix_gmem); freeDeviceBuffer(indr_mtrx); freeDeviceBuffer(Ai_mask); freeDeviceBuffer(cardnl); freeDeviceBuffer(result); return; } //////////////////////////////////////////////////////////////////////////////// void allocDeviceBuffer(void** bufferp, unsigned long bytes) { cudaMalloc(bufferp, bytes); CHECK_CUDA_ERROR(); } void freeDeviceBuffer(void* buffer) { cudaFree(buffer); } void copyToDevice(void* to_device, void* from_host, unsigned long bytes) { cudaMemcpy(to_device, from_host, bytes, cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(); } void copyFromDevice(void* to_host, void* from_device, unsigned long bytes) { cudaMemcpy(to_host, from_device, bytes, cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(); }
9103a44bc753ae98bb7c4da88b3a70a2d853d86f.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "util.hpp" #include "cuda_stream.hpp" #include "cuda_event.hpp" template <int Threads> __global__ void blur_shared(const double *in, double* out, int n) { __shared__ double buffer[Threads+2]; auto block_start = blockDim.x * blockIdx.x; auto li = threadIdx.x + 1; auto gi = li + block_start; if(gi<n-1) { // load shared memory buffer[li] = in[gi]; if(li==1) { buffer[0] = in[block_start]; buffer[Threads+1] = in[block_start+Threads+1]; } __syncthreads(); out[gi] = 0.25*(buffer[li-1] + 2.0*buffer[li] + buffer[li+1]); } } __global__ void blur_shared_block(const double *in, double* out, int n) { extern __shared__ double buffer[]; auto i = threadIdx.x + 1; if(i<n-1) { // load shared memory buffer[i] = in[i]; if(i==1) { buffer[0] = in[0]; buffer[n] = in[n]; } __syncthreads(); out[i] = 0.25*(buffer[i-1] + 2.0*buffer[i] + buffer[i+1]); } } __global__ void blur(const double *in, double* out, int n) { auto i = threadIdx.x + blockDim.x * blockIdx.x + 1; if(i<n-1) { out[i] = 0.25*(in[i-1] + 2.0*in[i] + in[i+1]); } } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 20); size_t nsteps = read_arg(argc, argv, 2, 100); bool use_shared = read_arg(argc, argv, 3, false); size_t n = 1 << pow; const auto size_in_bytes = n * sizeof(double); std::cout << "-- blur 1D test of length n = " << n << " : " << size_in_bytes*1e-9 << "MB\n"; std::cout << "-- using " << (use_shared ? "shared": "direct") << " kernel\n"; auto x_host = malloc_host<double>(n+2, 0.); // set boundary conditions to 1 x_host[0] = 1.0; x_host[n+1] = 1.0; auto x0 = malloc_device<double>(n+2); auto x1 = malloc_device<double>(n+2); // copy initial conditions to device copy_to_device<double>(x_host, x0, n+2); copy_to_device<double>(x_host, x1, n+2); // find the launch grid configuration constexpr auto block_dim = 128; const auto grid_dim = (n+(block_dim-1))/block_dim; if (use_shared) hipLaunchKernelGGL(( blur_shared<block_dim>), dim3(grid_dim), dim3(block_dim), 0, 0, x0, x1, n); else hipLaunchKernelGGL(( blur), dim3(grid_dim), dim3(block_dim), 0, 0, x0, x1, n); cuda_stream stream; auto start_event = stream.enqueue_event(); for(auto step=0; step<nsteps; ++step) { if (use_shared) { hipLaunchKernelGGL(( blur_shared<block_dim>), dim3(grid_dim), dim3(block_dim), 0, 0, x0, x1, n); } else { hipLaunchKernelGGL(( blur), dim3(grid_dim), dim3(block_dim), 0, 0, x0, x1, n); } std::swap(x0, x1); } auto stop_event = stream.enqueue_event(); // copy result back to host copy_to_host<double>(x0, x_host, n+2); stop_event.wait(); auto time = stop_event.time_since(start_event); std::cout << "==== " << time << " seconds : " << 1e3*time/nsteps << " ms/step\n"; return 0; }
9103a44bc753ae98bb7c4da88b3a70a2d853d86f.cu
#include <iostream> #include <cuda.h> #include "util.hpp" #include "cuda_stream.hpp" #include "cuda_event.hpp" template <int Threads> __global__ void blur_shared(const double *in, double* out, int n) { __shared__ double buffer[Threads+2]; auto block_start = blockDim.x * blockIdx.x; auto li = threadIdx.x + 1; auto gi = li + block_start; if(gi<n-1) { // load shared memory buffer[li] = in[gi]; if(li==1) { buffer[0] = in[block_start]; buffer[Threads+1] = in[block_start+Threads+1]; } __syncthreads(); out[gi] = 0.25*(buffer[li-1] + 2.0*buffer[li] + buffer[li+1]); } } __global__ void blur_shared_block(const double *in, double* out, int n) { extern __shared__ double buffer[]; auto i = threadIdx.x + 1; if(i<n-1) { // load shared memory buffer[i] = in[i]; if(i==1) { buffer[0] = in[0]; buffer[n] = in[n]; } __syncthreads(); out[i] = 0.25*(buffer[i-1] + 2.0*buffer[i] + buffer[i+1]); } } __global__ void blur(const double *in, double* out, int n) { auto i = threadIdx.x + blockDim.x * blockIdx.x + 1; if(i<n-1) { out[i] = 0.25*(in[i-1] + 2.0*in[i] + in[i+1]); } } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 20); size_t nsteps = read_arg(argc, argv, 2, 100); bool use_shared = read_arg(argc, argv, 3, false); size_t n = 1 << pow; const auto size_in_bytes = n * sizeof(double); std::cout << "-- blur 1D test of length n = " << n << " : " << size_in_bytes*1e-9 << "MB\n"; std::cout << "-- using " << (use_shared ? "shared": "direct") << " kernel\n"; auto x_host = malloc_host<double>(n+2, 0.); // set boundary conditions to 1 x_host[0] = 1.0; x_host[n+1] = 1.0; auto x0 = malloc_device<double>(n+2); auto x1 = malloc_device<double>(n+2); // copy initial conditions to device copy_to_device<double>(x_host, x0, n+2); copy_to_device<double>(x_host, x1, n+2); // find the launch grid configuration constexpr auto block_dim = 128; const auto grid_dim = (n+(block_dim-1))/block_dim; if (use_shared) blur_shared<block_dim><<<grid_dim, block_dim>>>(x0, x1, n); else blur<<<grid_dim, block_dim>>>(x0, x1, n); cuda_stream stream; auto start_event = stream.enqueue_event(); for(auto step=0; step<nsteps; ++step) { if (use_shared) { blur_shared<block_dim><<<grid_dim, block_dim>>>(x0, x1, n); } else { blur<<<grid_dim, block_dim>>>(x0, x1, n); } std::swap(x0, x1); } auto stop_event = stream.enqueue_event(); // copy result back to host copy_to_host<double>(x0, x_host, n+2); stop_event.wait(); auto time = stop_event.time_since(start_event); std::cout << "==== " << time << " seconds : " << 1e3*time/nsteps << " ms/step\n"; return 0; }
ccc90c7239d1451524d3184634ed85e92e7e4e57.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * \brief gemm: C = A * B. */ #include "cuda_util.h" // Initialize the input data. void GenMatrix(const int height, const int width, float *mat) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { mat[i*width + j] = (float)rand() / RAND_MAX + (float)rand() / (RAND_MAX*RAND_MAX); } } } // Just for checking the result. float GetMean(const float* mat, const int height, const int width) { int num = height * width; float total = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { total += mat[i*width + j]; } } return total / num; } // Just for checking the result too. void MatrixPrint(const float* mat, const int height, const int width) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { std::cout << mat[i*width + j] << ","; } std::cout << std::endl; } } // CPU version 1: 1583 ms // Normal version in cpu as a reference void MatrixMulCPUv1(const int M, const int N, const int K, const float ALPHA, const float *A, const int lda, const float *B, const int ldb, float *C, const int ldc) { int i, j, k; memset(C, 0, sizeof(float) * ldc * M); for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } // CPU version 2: 3389 ms // Block based matrix multiplication in cpu. void MatrixMulCPUv2(const int M, const int N, const int K, const float ALPHA, const float *A, const int lda, const float *B, const int ldb, float *C, const int ldc) { int bi, bj, bk; int i, j, k; const int block_size = 32; int block_num_M = M / block_size; int block_num_N = N / block_size; int block_num_K = K / block_size; memset(C, 0, sizeof(float) * ldc * M); // Loop over all of the blocks. for (bi = 0; bi < block_num_M; ++bi) { for (bj = 0; bj < block_num_N; ++bj) { for (bk = 0; bk < block_num_K; ++bk) { // Loop over all of the elements in a block. for (i = bi*block_size; i < (bi + 1)*block_size; ++i) { for (k = bk*block_size; k < (bk + 1)*block_size; ++k) { for (j = bj*block_size; j < (bj + 1)*block_size; ++j) { C[i*ldc + j] += A[i*lda + k] * B[k*ldb + j]; } } } } } } } // CUDA version 1: 72 ms // It is rewrited from MatrixMulCPUv2. // bi,bj can be replaced by blockIdx.x,blockIdx.y // i,j can be replaced by threadIdx.x,threadIdx.y // so just bk and k left. Grid and block is related to the dst matrix. // // \ C[ty, tx] = A[ty, k] * B[k, tx] // for bk -> bk_num_per_grid // for k -> k_num_per_block // C[bi*bs + ty, bj*bs + tx] = A[bi*bs + ty, bk*bs + k] * B[k*bs + k, bj*bs + tx] template <int BLOCK_SIZE> __global__ void MatrixMulKernelv1(const int M, const int N, const int K, const float ALPHA, const float *A, const int lda, const float *B, const int ldb, float *C, const int ldc) { float c_sub_acc = 0; for (int bk = 0; bk < K / BLOCK_SIZE; bk++) { for (int k = 0;k < BLOCK_SIZE; k++) { c_sub_acc += A[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * lda + (bk * BLOCK_SIZE + k)] * B[(bk * BLOCK_SIZE + k) * ldb + (blockIdx.x * BLOCK_SIZE + threadIdx.x)]; } } C[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * ldc + (blockIdx.x * BLOCK_SIZE + threadIdx.x)] += c_sub_acc; } // CUDA version 2. // Use shared memory. template <int BLOCK_SIZE> __global__ void MatrixMulKernelv2(const int M, const int N, const int K, const float ALPHA, const float *A, const int lda, const float *B, const int ldb, float *C, const int ldc) { __shared__ float a_shared[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float b_shared[BLOCK_SIZE][BLOCK_SIZE]; float c_sub_acc = 0; // For blocks in grid. for (int bk = 0; bk < K / BLOCK_SIZE; bk++) { a_shared[threadIdx.y][threadIdx.x] = A[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * lda + (bk * BLOCK_SIZE + threadIdx.x)]; b_shared[threadIdx.y][threadIdx.x] = B[(bk * BLOCK_SIZE + threadIdx.y) * ldb + (blockIdx.x * BLOCK_SIZE + threadIdx.x)]; // Wait for data to complete loading to Shared memory. __syncthreads(); // For elements in a block. for (int k = 0;k < BLOCK_SIZE; k++) { c_sub_acc += a_shared[threadIdx.y][k] * b_shared[k][threadIdx.x]; } // To prevent the case from happening: // The next round of data is loaded when the data in share memory is not used up. __syncthreads(); } C[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * ldc + (blockIdx.x * BLOCK_SIZE + threadIdx.x)] += c_sub_acc; } //#define TEST_CUDA_V1 float MatrixMulCUDA(const int M, const int N, const int K, const float ALPHA, const float *A, const int lda, const float *B, const int ldb, float *C, const int ldc) { cjmcv_cuda_util::GpuTimer gpu_timer; const int block_size = 32; dim3 threads_per_block(block_size, block_size); dim3 blocks_per_grid(N / threads_per_block.x, M / threads_per_block.y); // Warm up. MatrixMulKernelv1<block_size> << <blocks_per_grid, threads_per_block >> > (M, N, K, 1.0, A, lda, B, ldb, C, ldc); hipMemset(C, 0, sizeof(float) * M * N); // Record the start event gpu_timer.Start(); #ifdef TEST_CUDA_V1 MatrixMulKernelv1<block_size> << <blocks_per_grid, threads_per_block >> > (M, N, K, 1.0, A, lda, B, ldb, C, ldc); #else MatrixMulKernelv2<block_size> << <blocks_per_grid, threads_per_block >> > (M, N, K, 1.0, A, lda, B, ldb, C, ldc); #endif // Record the stop event gpu_timer.Stop(); return gpu_timer.ElapsedMillis(); } int main() { int ret = cjmcv_cuda_util::InitEnvironment(0); if (ret != 0) { printf("Failed to initialize the environment for cuda."); return -1; } int height_a = 2560, width_a = 800; int height_b = 800, width_b = 3200; if (width_a != height_b) { printf("width_a should be equal to height_b.\n"); return 1; } const int mem_size_a = sizeof(float) * height_a * width_a; const int mem_size_b = sizeof(float) * height_b * width_b; const int mem_size_c = sizeof(float) * height_a * width_b; float *h_a = (float *)malloc(mem_size_a); float *h_b = (float *)malloc(mem_size_b); float *h_c = (float *)malloc(mem_size_c); if (h_a == NULL || h_b == NULL || h_c == NULL) { printf("Fail to malloc.\n"); return 1; } // Initialize srand(0); GenMatrix(height_a, width_a, h_a); GenMatrix(height_b, width_b, h_b); // CPU time_t t = clock(); MatrixMulCPUv1(height_a, width_b, width_a, 1.0, h_a, width_a,h_b, width_b, h_c, width_b); printf("In cpu version 1, msec_total = %lld, mean = %f\n", clock() - t, GetMean(h_c, height_a, width_b)); //MatrixPrint(h_c, height_a, width_b); t = clock(); MatrixMulCPUv2(height_a, width_b, width_a, 1.0, h_a, width_a, h_b, width_b, h_c, width_b); printf("In cpu version 2, msec_total = %lld, mean = %f\n", clock() - t, GetMean(h_c, height_a, width_b)); //MatrixPrint(h_c, height_a, width_b); // GPU // Allocate memory in host. float msec_total; float *d_a, *d_b, *d_c; CUDA_CHECK(hipMalloc((void **)&d_a, mem_size_a)); CUDA_CHECK(hipMalloc((void **)&d_b, mem_size_b)); CUDA_CHECK(hipMalloc((void **)&d_c, mem_size_c)); // Copy host memory to device CUDA_CHECK(hipMemcpy(d_a, h_a, mem_size_a, hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(d_b, h_b, mem_size_b, hipMemcpyHostToDevice)); msec_total = MatrixMulCUDA(height_a, width_b, width_a, 1.0, d_a, width_a, d_b, width_b, d_c, width_b); // Copy memory back to host. CUDA_CHECK(hipMemcpy(h_c, d_c, mem_size_c, hipMemcpyDeviceToHost)); printf("In gpu version 1, msec_total = %f, mean = %f\n", msec_total, GetMean(h_c, height_a, width_b)); //MatrixPrint(h_c, height_a, width_b); free(h_a); free(h_b); free(h_c); hipFree(d_a); hipFree(d_b); hipFree(d_c); cjmcv_cuda_util::CleanUpEnvironment(); return 0; }
ccc90c7239d1451524d3184634ed85e92e7e4e57.cu
/*! * \brief gemm: C = A * B. */ #include "cuda_util.h" // Initialize the input data. void GenMatrix(const int height, const int width, float *mat) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { mat[i*width + j] = (float)rand() / RAND_MAX + (float)rand() / (RAND_MAX*RAND_MAX); } } } // Just for checking the result. float GetMean(const float* mat, const int height, const int width) { int num = height * width; float total = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { total += mat[i*width + j]; } } return total / num; } // Just for checking the result too. void MatrixPrint(const float* mat, const int height, const int width) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { std::cout << mat[i*width + j] << ","; } std::cout << std::endl; } } // CPU version 1: 1583 ms // Normal version in cpu as a reference void MatrixMulCPUv1(const int M, const int N, const int K, const float ALPHA, const float *A, const int lda, const float *B, const int ldb, float *C, const int ldc) { int i, j, k; memset(C, 0, sizeof(float) * ldc * M); for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register float A_PART = ALPHA*A[i*lda + k]; for (j = 0; j < N; ++j) { C[i*ldc + j] += A_PART*B[k*ldb + j]; } } } } // CPU version 2: 3389 ms // Block based matrix multiplication in cpu. void MatrixMulCPUv2(const int M, const int N, const int K, const float ALPHA, const float *A, const int lda, const float *B, const int ldb, float *C, const int ldc) { int bi, bj, bk; int i, j, k; const int block_size = 32; int block_num_M = M / block_size; int block_num_N = N / block_size; int block_num_K = K / block_size; memset(C, 0, sizeof(float) * ldc * M); // Loop over all of the blocks. for (bi = 0; bi < block_num_M; ++bi) { for (bj = 0; bj < block_num_N; ++bj) { for (bk = 0; bk < block_num_K; ++bk) { // Loop over all of the elements in a block. for (i = bi*block_size; i < (bi + 1)*block_size; ++i) { for (k = bk*block_size; k < (bk + 1)*block_size; ++k) { for (j = bj*block_size; j < (bj + 1)*block_size; ++j) { C[i*ldc + j] += A[i*lda + k] * B[k*ldb + j]; } } } } } } } // CUDA version 1: 72 ms // It is rewrited from MatrixMulCPUv2. // bi,bj can be replaced by blockIdx.x,blockIdx.y // i,j can be replaced by threadIdx.x,threadIdx.y // so just bk and k left. Grid and block is related to the dst matrix. // // \ C[ty, tx] = A[ty, k] * B[k, tx] // for bk -> bk_num_per_grid // for k -> k_num_per_block // C[bi*bs + ty, bj*bs + tx] = A[bi*bs + ty, bk*bs + k] * B[k*bs + k, bj*bs + tx] template <int BLOCK_SIZE> __global__ void MatrixMulKernelv1(const int M, const int N, const int K, const float ALPHA, const float *A, const int lda, const float *B, const int ldb, float *C, const int ldc) { float c_sub_acc = 0; for (int bk = 0; bk < K / BLOCK_SIZE; bk++) { for (int k = 0;k < BLOCK_SIZE; k++) { c_sub_acc += A[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * lda + (bk * BLOCK_SIZE + k)] * B[(bk * BLOCK_SIZE + k) * ldb + (blockIdx.x * BLOCK_SIZE + threadIdx.x)]; } } C[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * ldc + (blockIdx.x * BLOCK_SIZE + threadIdx.x)] += c_sub_acc; } // CUDA version 2. // Use shared memory. template <int BLOCK_SIZE> __global__ void MatrixMulKernelv2(const int M, const int N, const int K, const float ALPHA, const float *A, const int lda, const float *B, const int ldb, float *C, const int ldc) { __shared__ float a_shared[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float b_shared[BLOCK_SIZE][BLOCK_SIZE]; float c_sub_acc = 0; // For blocks in grid. for (int bk = 0; bk < K / BLOCK_SIZE; bk++) { a_shared[threadIdx.y][threadIdx.x] = A[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * lda + (bk * BLOCK_SIZE + threadIdx.x)]; b_shared[threadIdx.y][threadIdx.x] = B[(bk * BLOCK_SIZE + threadIdx.y) * ldb + (blockIdx.x * BLOCK_SIZE + threadIdx.x)]; // Wait for data to complete loading to Shared memory. __syncthreads(); // For elements in a block. for (int k = 0;k < BLOCK_SIZE; k++) { c_sub_acc += a_shared[threadIdx.y][k] * b_shared[k][threadIdx.x]; } // To prevent the case from happening: // The next round of data is loaded when the data in share memory is not used up. __syncthreads(); } C[(blockIdx.y * BLOCK_SIZE + threadIdx.y) * ldc + (blockIdx.x * BLOCK_SIZE + threadIdx.x)] += c_sub_acc; } //#define TEST_CUDA_V1 float MatrixMulCUDA(const int M, const int N, const int K, const float ALPHA, const float *A, const int lda, const float *B, const int ldb, float *C, const int ldc) { cjmcv_cuda_util::GpuTimer gpu_timer; const int block_size = 32; dim3 threads_per_block(block_size, block_size); dim3 blocks_per_grid(N / threads_per_block.x, M / threads_per_block.y); // Warm up. MatrixMulKernelv1<block_size> << <blocks_per_grid, threads_per_block >> > (M, N, K, 1.0, A, lda, B, ldb, C, ldc); cudaMemset(C, 0, sizeof(float) * M * N); // Record the start event gpu_timer.Start(); #ifdef TEST_CUDA_V1 MatrixMulKernelv1<block_size> << <blocks_per_grid, threads_per_block >> > (M, N, K, 1.0, A, lda, B, ldb, C, ldc); #else MatrixMulKernelv2<block_size> << <blocks_per_grid, threads_per_block >> > (M, N, K, 1.0, A, lda, B, ldb, C, ldc); #endif // Record the stop event gpu_timer.Stop(); return gpu_timer.ElapsedMillis(); } int main() { int ret = cjmcv_cuda_util::InitEnvironment(0); if (ret != 0) { printf("Failed to initialize the environment for cuda."); return -1; } int height_a = 2560, width_a = 800; int height_b = 800, width_b = 3200; if (width_a != height_b) { printf("width_a should be equal to height_b.\n"); return 1; } const int mem_size_a = sizeof(float) * height_a * width_a; const int mem_size_b = sizeof(float) * height_b * width_b; const int mem_size_c = sizeof(float) * height_a * width_b; float *h_a = (float *)malloc(mem_size_a); float *h_b = (float *)malloc(mem_size_b); float *h_c = (float *)malloc(mem_size_c); if (h_a == NULL || h_b == NULL || h_c == NULL) { printf("Fail to malloc.\n"); return 1; } // Initialize srand(0); GenMatrix(height_a, width_a, h_a); GenMatrix(height_b, width_b, h_b); // CPU time_t t = clock(); MatrixMulCPUv1(height_a, width_b, width_a, 1.0, h_a, width_a,h_b, width_b, h_c, width_b); printf("In cpu version 1, msec_total = %lld, mean = %f\n", clock() - t, GetMean(h_c, height_a, width_b)); //MatrixPrint(h_c, height_a, width_b); t = clock(); MatrixMulCPUv2(height_a, width_b, width_a, 1.0, h_a, width_a, h_b, width_b, h_c, width_b); printf("In cpu version 2, msec_total = %lld, mean = %f\n", clock() - t, GetMean(h_c, height_a, width_b)); //MatrixPrint(h_c, height_a, width_b); // GPU // Allocate memory in host. float msec_total; float *d_a, *d_b, *d_c; CUDA_CHECK(cudaMalloc((void **)&d_a, mem_size_a)); CUDA_CHECK(cudaMalloc((void **)&d_b, mem_size_b)); CUDA_CHECK(cudaMalloc((void **)&d_c, mem_size_c)); // Copy host memory to device CUDA_CHECK(cudaMemcpy(d_a, h_a, mem_size_a, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(d_b, h_b, mem_size_b, cudaMemcpyHostToDevice)); msec_total = MatrixMulCUDA(height_a, width_b, width_a, 1.0, d_a, width_a, d_b, width_b, d_c, width_b); // Copy memory back to host. CUDA_CHECK(cudaMemcpy(h_c, d_c, mem_size_c, cudaMemcpyDeviceToHost)); printf("In gpu version 1, msec_total = %f, mean = %f\n", msec_total, GetMean(h_c, height_a, width_b)); //MatrixPrint(h_c, height_a, width_b); free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cjmcv_cuda_util::CleanUpEnvironment(); return 0; }
287f802637d6f2f28e91af3335255ad66cf780e0.hip
// !!! This is a file automatically generated by hipify!!! /* CUDA blur * Kevin Yuh, 2014 */ #include <cstdio> #include <hip/hip_runtime.h> #include <hipfft.h> #include "fft_convolve.cuh" /* Atomic-max function. You may find it useful for normalization. We haven't really talked about this yet, but __device__ functions not only are run on the GPU, but are called from within a kernel. Source: http://stackoverflow.com/questions/17399119/ cant-we-use-atomic-operations-for-floating-point-variables-in-cuda */ __device__ static float atomicMax(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fmaxf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } __global__ void cudaProdScaleKernel(const hipfftComplex *raw_data, const hipfftComplex *impulse_v, hipfftComplex *out_data, int padded_length) { /* TODO: Implement the point-wise multiplication and scaling for the FFT'd input and impulse response. Recall that these are complex numbers, so you'll need to use the appropriate rule for multiplying them. Also remember to scale by the padded length of the signal (see the notes for Question 1). As in Assignment 1 and Week 1, remember to make your implementation resilient to varying numbers of threads. */ int tid = blockIdx.x*blockDim.x + threadIdx.x; while (tid < padded_length){ hipfftComplex in = raw_data[tid]; hipfftComplex impulse = impulse_v[tid]; out_data[tid].x = (in.x*impulse.x - in.y*impulse.y)/padded_length; out_data[tid].y = (in.x*impulse.y + in.y*impulse.x)/padded_length; tid += blockDim.x*gridDim.x; } } __global__ void cudaMaximumKernel(hipfftComplex *out_data, float *max_abs_val, int padded_length) { /* TODO 2: Implement the maximum-finding and subsequent normalization (dividing by maximum). There are many ways to do this reduction, and some methods have much better performance than others. For this section: Please explain your approach to the reduction, including why you chose the optimizations you did (especially as they relate to GPU hardware). You'll likely find the above atomicMax function helpful. (CUDA's atomicMax function doesn't work for floating-point values.) It's based on two principles: 1) From Week 2, any atomic function can be implemented using atomic compare-and-swap. 2) One can "represent" floating-point values as integers in a way that preserves comparison, if the sign of the two values is the same. (see http://stackoverflow.com/questions/ 29596797/can-the-return-value-of-float-as-int-be-used-to- compare-float-in-cuda) */ // Thread index for accessing global memory int indx = blockIdx.x*blockDim.x + threadIdx.x; // Thread index for accessing shared memory made const for speedup const int tid = threadIdx.x; // Shared memory allocated dynamically from kernal call extern __shared__ float sdata[]; // Pull (real) data into shared memory // Memory is coalleced sdata[tid] = abs(out_data[indx].x); indx += gridDim.x*blockDim.x; while (indx < padded_length){ // if there are more data values than total threads, sdata[tid] = max(abs(out_data[indx].x), sdata[tid]); // use max to avoid if statement indx += gridDim.x*blockDim.x; } __syncthreads(); // make sure shared memory is ready for (int s= blockDim.x/2; s > 0; s>>=1){ /* This implementation uses the tecnique of sequential addressing. Each thread is responsible for finding the max between the data at tid and tid+s. This approach allows us to avoid bank conflicts since the stride is always 1 */ if(tid < s){ // s = 16, 8, 4, 2, 1 // here each thread finds the max between the data at tid and // and address on the "other side", a distance of s away. sdata[tid] = max(sdata[tid+s],sdata[tid]); } __syncthreads(); } // In emperical tests, unrolling the loop did NOT result in speed gains // if (tid < 16){ // sdata[tid] = max(sdata[tid+16],sdata[tid]); // sdata[tid] = max(sdata[tid+8],sdata[tid]); // sdata[tid] = max(sdata[tid+4],sdata[tid]); // sdata[tid] = max(sdata[tid+2],sdata[tid]); // sdata[tid] = max(sdata[tid+1],sdata[tid]); // } // __syncthreads(); // atomicMax is used by each thread to compare the value of the data point // at the first index (i.e. the max for the particular warp) with the current // maximum value in global memory. if (tid == 0) atomicMax(max_abs_val, sdata[0]); } __global__ void cudaDivideKernel(hipfftComplex *out_data, float *max_abs_val, int padded_length) { /* TODO 2: Implement the division kernel. Divide all data by the value pointed to by max_abs_val. This kernel should be quite short. */ int tid = blockDim.x*blockIdx.x + threadIdx.x; while (tid < padded_length){ out_data[tid].x /= *max_abs_val; out_data[tid].y /= *max_abs_val; tid += gridDim.x*blockDim.x; } } void cudaCallProdScaleKernel(const unsigned int blocks, const unsigned int threadsPerBlock, const hipfftComplex *raw_data, const hipfftComplex *impulse_v, hipfftComplex *out_data, const unsigned int padded_length) { /* TODO: Call the element-wise product and scaling kernel. */ hipLaunchKernelGGL(( cudaProdScaleKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, raw_data, impulse_v, out_data, padded_length); } void cudaCallMaximumKernel(const unsigned int blocks, const unsigned int threadsPerBlock, hipfftComplex *out_data, float *max_abs_val, const unsigned int padded_length) { /* TODO 2: Call the max-finding kernel. */ hipLaunchKernelGGL(( cudaMaximumKernel), dim3(blocks), dim3(threadsPerBlock), threadsPerBlock*sizeof(float), 0, out_data, max_abs_val, padded_length); // Dynamic shared memory allocation? // Do we need anothe array for } void cudaCallDivideKernel(const unsigned int blocks, const unsigned int threadsPerBlock, hipfftComplex *out_data, float *max_abs_val, const unsigned int padded_length) { /* TODO 2: Call the division kernel. */ hipLaunchKernelGGL(( cudaDivideKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, out_data, max_abs_val, padded_length); }
287f802637d6f2f28e91af3335255ad66cf780e0.cu
/* CUDA blur * Kevin Yuh, 2014 */ #include <cstdio> #include <cuda_runtime.h> #include <cufft.h> #include "fft_convolve.cuh" /* Atomic-max function. You may find it useful for normalization. We haven't really talked about this yet, but __device__ functions not only are run on the GPU, but are called from within a kernel. Source: http://stackoverflow.com/questions/17399119/ cant-we-use-atomic-operations-for-floating-point-variables-in-cuda */ __device__ static float atomicMax(float* address, float val) { int* address_as_i = (int*) address; int old = *address_as_i, assumed; do { assumed = old; old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fmaxf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } __global__ void cudaProdScaleKernel(const cufftComplex *raw_data, const cufftComplex *impulse_v, cufftComplex *out_data, int padded_length) { /* TODO: Implement the point-wise multiplication and scaling for the FFT'd input and impulse response. Recall that these are complex numbers, so you'll need to use the appropriate rule for multiplying them. Also remember to scale by the padded length of the signal (see the notes for Question 1). As in Assignment 1 and Week 1, remember to make your implementation resilient to varying numbers of threads. */ int tid = blockIdx.x*blockDim.x + threadIdx.x; while (tid < padded_length){ cufftComplex in = raw_data[tid]; cufftComplex impulse = impulse_v[tid]; out_data[tid].x = (in.x*impulse.x - in.y*impulse.y)/padded_length; out_data[tid].y = (in.x*impulse.y + in.y*impulse.x)/padded_length; tid += blockDim.x*gridDim.x; } } __global__ void cudaMaximumKernel(cufftComplex *out_data, float *max_abs_val, int padded_length) { /* TODO 2: Implement the maximum-finding and subsequent normalization (dividing by maximum). There are many ways to do this reduction, and some methods have much better performance than others. For this section: Please explain your approach to the reduction, including why you chose the optimizations you did (especially as they relate to GPU hardware). You'll likely find the above atomicMax function helpful. (CUDA's atomicMax function doesn't work for floating-point values.) It's based on two principles: 1) From Week 2, any atomic function can be implemented using atomic compare-and-swap. 2) One can "represent" floating-point values as integers in a way that preserves comparison, if the sign of the two values is the same. (see http://stackoverflow.com/questions/ 29596797/can-the-return-value-of-float-as-int-be-used-to- compare-float-in-cuda) */ // Thread index for accessing global memory int indx = blockIdx.x*blockDim.x + threadIdx.x; // Thread index for accessing shared memory made const for speedup const int tid = threadIdx.x; // Shared memory allocated dynamically from kernal call extern __shared__ float sdata[]; // Pull (real) data into shared memory // Memory is coalleced sdata[tid] = abs(out_data[indx].x); indx += gridDim.x*blockDim.x; while (indx < padded_length){ // if there are more data values than total threads, sdata[tid] = max(abs(out_data[indx].x), sdata[tid]); // use max to avoid if statement indx += gridDim.x*blockDim.x; } __syncthreads(); // make sure shared memory is ready for (int s= blockDim.x/2; s > 0; s>>=1){ /* This implementation uses the tecnique of sequential addressing. Each thread is responsible for finding the max between the data at tid and tid+s. This approach allows us to avoid bank conflicts since the stride is always 1 */ if(tid < s){ // s = 16, 8, 4, 2, 1 // here each thread finds the max between the data at tid and // and address on the "other side", a distance of s away. sdata[tid] = max(sdata[tid+s],sdata[tid]); } __syncthreads(); } // In emperical tests, unrolling the loop did NOT result in speed gains // if (tid < 16){ // sdata[tid] = max(sdata[tid+16],sdata[tid]); // sdata[tid] = max(sdata[tid+8],sdata[tid]); // sdata[tid] = max(sdata[tid+4],sdata[tid]); // sdata[tid] = max(sdata[tid+2],sdata[tid]); // sdata[tid] = max(sdata[tid+1],sdata[tid]); // } // __syncthreads(); // atomicMax is used by each thread to compare the value of the data point // at the first index (i.e. the max for the particular warp) with the current // maximum value in global memory. if (tid == 0) atomicMax(max_abs_val, sdata[0]); } __global__ void cudaDivideKernel(cufftComplex *out_data, float *max_abs_val, int padded_length) { /* TODO 2: Implement the division kernel. Divide all data by the value pointed to by max_abs_val. This kernel should be quite short. */ int tid = blockDim.x*blockIdx.x + threadIdx.x; while (tid < padded_length){ out_data[tid].x /= *max_abs_val; out_data[tid].y /= *max_abs_val; tid += gridDim.x*blockDim.x; } } void cudaCallProdScaleKernel(const unsigned int blocks, const unsigned int threadsPerBlock, const cufftComplex *raw_data, const cufftComplex *impulse_v, cufftComplex *out_data, const unsigned int padded_length) { /* TODO: Call the element-wise product and scaling kernel. */ cudaProdScaleKernel<<<blocks, threadsPerBlock>>>(raw_data, impulse_v, out_data, padded_length); } void cudaCallMaximumKernel(const unsigned int blocks, const unsigned int threadsPerBlock, cufftComplex *out_data, float *max_abs_val, const unsigned int padded_length) { /* TODO 2: Call the max-finding kernel. */ cudaMaximumKernel<<<blocks, threadsPerBlock, threadsPerBlock*sizeof(float)>>>(out_data, max_abs_val, padded_length); // Dynamic shared memory allocation? // Do we need anothe array for } void cudaCallDivideKernel(const unsigned int blocks, const unsigned int threadsPerBlock, cufftComplex *out_data, float *max_abs_val, const unsigned int padded_length) { /* TODO 2: Call the division kernel. */ cudaDivideKernel<<<blocks, threadsPerBlock>>>(out_data, max_abs_val, padded_length); }
1f1e500620fe7794ff9955b45299b9db0287bbb1.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "efficient.h" #include <iostream> #define DEBUG 0 namespace StreamCompaction { namespace Efficient { const int threadCount = 32; #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } void printArray(int n, int * a) { printf("\n"); for(int i=0; i<n; ++i) printf("%d ", a[i]); printf("\n"); } __global__ void setK(int * k, int * data, int *bool_data, int index) { (*k) = data[index] + bool_data[index]; } __global__ void blockWiseScan(int n, int *odata, int *idata) { //Reference-> http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < n) { //Do block exclusive scans __shared__ int data[threadCount]; unsigned int t = threadIdx.x; n = blockDim.x; data[t] = idata[index]; int offset = 1; for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (t < d) { int ai = offset * ((t<<1)+1) - 1; int bi = offset * ((t<<1)+2) - 1; data[bi] += data[ai]; } offset <<= 1; } if (t == 0) { data[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d <<= 1) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (t < d) { int ai = offset * ((t<<1)+1) - 1; int bi = offset * ((t<<1)+2) - 1; float t = data[ai]; data[ai] = data[bi]; data[bi] += t; } } odata[index] = data[t]; } } __global__ void createTemp(int * odata, int *idata, int * temp, int numThreads) { int index = threadIdx.x + (blockIdx.x * blockDim.x); temp[index] = odata[(index+1) * numThreads - 1] + idata[(index+1) * numThreads - 1]; } __global__ void updateidata(int n, int *odata, int *temp_data, int numThreads) { int index = threadIdx.x + (blockIdx.x * blockDim.x); odata[index] += temp_data[(index / numThreads)]; } void exclusiveScan(int n, int *odata, int *idata, int numBlocks, int numThreads) { hipLaunchKernelGGL(( blockWiseScan), dim3(numBlocks), dim3(numThreads), 0, 0, n, odata, idata); checkCUDAError("BlockWiseScan1"); int *printData = new int[n]; // if(DEBUG) // { // std::cout<<"\nblockWiseScan"; // hipMemcpy(printData, odata, n * sizeof(int), hipMemcpyDeviceToHost); // printArray(n, printData); // } //Then we have to recurse and solve the odata array, So create a new array and solve. int *dev_temp, *dev_odata; int p = ilog2ceil(numBlocks); int fullN = pow(2, p); hipMalloc((void**)&dev_temp, fullN * sizeof(int)); hipMemset(dev_temp, 0, fullN * sizeof(int)); int newN = numBlocks; int newNumBlocks = (numBlocks + numThreads - 1) / numThreads; hipLaunchKernelGGL(( createTemp), dim3(newNumBlocks), dim3(numThreads), 0, 0, odata, idata, dev_temp, numThreads); checkCUDAError("createTemp"); // if(DEBUG) // { // std::cout<<"\ncreateTemp"; // hipMemcpy(printData, dev_temp, newN * sizeof(int), hipMemcpyDeviceToHost); // printArray(newN, printData); // } hipMalloc((void**)&dev_odata, fullN * sizeof(int)); if(numBlocks > numThreads) { exclusiveScan(newN, dev_odata, dev_temp, newNumBlocks, numThreads); } else { hipLaunchKernelGGL(( blockWiseScan), dim3(newNumBlocks), dim3(numThreads), 0, 0, newN, dev_odata, dev_temp); checkCUDAError("BlockWiseScan2"); } hipLaunchKernelGGL(( updateidata), dim3(numBlocks), dim3(numThreads), 0, 0, n, odata, dev_odata, numThreads); checkCUDAError("updateidata"); // if(DEBUG) // { // std::cout<<"\nupdate idata"; // hipMemcpy(printData, odata, n * sizeof(int), hipMemcpyDeviceToHost); // printArray(n, printData); // } hipFree(dev_temp); hipFree(dev_odata); delete(printData); } int compact(int n, RayState *idata) { RayState * hst_idata = new RayState[n]; hipMemcpy(hst_idata, idata, n * sizeof(RayState), hipMemcpyDeviceToHost); int i, count = 0; for(i=0; i<n; ++i) { if(hst_idata[i].isAlive) count++; } // if(DEBUG) // { // std::cout<<"Count Alive: "<<count<<std::endl; // } int oriN = n; int p = ilog2ceil(n); n = pow(2, p); int numThreads = threadCount, numBlocks = (n + numThreads - 1) / numThreads; RayState *dev_odata; int *dev_k = NULL, *dev_bool = NULL, *dev_temp = NULL; int *printData = new int[n]; hipMalloc((void**)&dev_k, sizeof(int)); hipMalloc((void**)&dev_bool, n * sizeof(int)); hipMalloc((void**)&dev_temp, n * sizeof(int)); hipLaunchKernelGGL(( StreamCompaction::Common::kernMapToBoolean), dim3(numBlocks), dim3(numThreads), 0, 0, oriN, dev_bool, idata); checkCUDAError("kernMapToBool"); // if(DEBUG) // { // std::cout<<"\nBools : "; // hipMemcpy(printData, dev_bool, n * sizeof(int), hipMemcpyDeviceToHost); // printArray(n, printData); // } exclusiveScan(n, dev_temp, dev_bool, numBlocks, numThreads); checkCUDAError("Exclusive Scan"); hipLaunchKernelGGL(( setK), dim3(1),dim3(1), 0, 0, dev_k, dev_temp, dev_bool, n-1); int *k = new int; hipMemcpy(k, dev_k, sizeof(int), hipMemcpyDeviceToHost); hipMalloc((void**)&dev_odata, (*k) * sizeof(RayState)); hipLaunchKernelGGL(( StreamCompaction::Common::kernScatter), dim3(numBlocks), dim3(numThreads), 0, 0, n, dev_odata, idata, dev_bool, dev_temp); checkCUDAError("kernScatter"); hipMemcpy(idata, dev_odata, (*k) * sizeof(RayState), hipMemcpyDeviceToDevice); // if(DEBUG) // { // std::cout<<"K :"<<*k<<std::endl; // } hipFree(dev_bool); hipFree(dev_k); hipFree(dev_temp); hipFree(dev_odata); delete(printData); return (*k); } } } namespace StreamCompaction { namespace Common { __global__ void kernMapToBoolean(int n, int *bools, const RayState *idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < n) { bools[index] = (idata[index].isAlive) ? 1 : 0; } else { bools[index] = 0; } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, RayState *odata, const RayState *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < n) { if(bools[index] == 1) { int i = indices[index]; odata[i].isAlive = idata[index].isAlive; odata[i].pixelIndex = idata[index].pixelIndex; odata[i].rayColor = idata[index].rayColor; odata[i].ray.direction = idata[index].ray.direction; odata[i].ray.origin = idata[index].ray.origin; } } } } }
1f1e500620fe7794ff9955b45299b9db0287bbb1.cu
#include <cuda.h> #include <cuda_runtime.h> #include "efficient.h" #include <iostream> #define DEBUG 0 namespace StreamCompaction { namespace Efficient { const int threadCount = 32; #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } void printArray(int n, int * a) { printf("\n"); for(int i=0; i<n; ++i) printf("%d ", a[i]); printf("\n"); } __global__ void setK(int * k, int * data, int *bool_data, int index) { (*k) = data[index] + bool_data[index]; } __global__ void blockWiseScan(int n, int *odata, int *idata) { //Reference-> http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < n) { //Do block exclusive scans __shared__ int data[threadCount]; unsigned int t = threadIdx.x; n = blockDim.x; data[t] = idata[index]; int offset = 1; for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (t < d) { int ai = offset * ((t<<1)+1) - 1; int bi = offset * ((t<<1)+2) - 1; data[bi] += data[ai]; } offset <<= 1; } if (t == 0) { data[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d <<= 1) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (t < d) { int ai = offset * ((t<<1)+1) - 1; int bi = offset * ((t<<1)+2) - 1; float t = data[ai]; data[ai] = data[bi]; data[bi] += t; } } odata[index] = data[t]; } } __global__ void createTemp(int * odata, int *idata, int * temp, int numThreads) { int index = threadIdx.x + (blockIdx.x * blockDim.x); temp[index] = odata[(index+1) * numThreads - 1] + idata[(index+1) * numThreads - 1]; } __global__ void updateidata(int n, int *odata, int *temp_data, int numThreads) { int index = threadIdx.x + (blockIdx.x * blockDim.x); odata[index] += temp_data[(index / numThreads)]; } void exclusiveScan(int n, int *odata, int *idata, int numBlocks, int numThreads) { blockWiseScan<<<numBlocks, numThreads>>>(n, odata, idata); checkCUDAError("BlockWiseScan1"); int *printData = new int[n]; // if(DEBUG) // { // std::cout<<"\nblockWiseScan"; // cudaMemcpy(printData, odata, n * sizeof(int), cudaMemcpyDeviceToHost); // printArray(n, printData); // } //Then we have to recurse and solve the odata array, So create a new array and solve. int *dev_temp, *dev_odata; int p = ilog2ceil(numBlocks); int fullN = pow(2, p); cudaMalloc((void**)&dev_temp, fullN * sizeof(int)); cudaMemset(dev_temp, 0, fullN * sizeof(int)); int newN = numBlocks; int newNumBlocks = (numBlocks + numThreads - 1) / numThreads; createTemp<<<newNumBlocks, numThreads>>>(odata, idata, dev_temp, numThreads); checkCUDAError("createTemp"); // if(DEBUG) // { // std::cout<<"\ncreateTemp"; // cudaMemcpy(printData, dev_temp, newN * sizeof(int), cudaMemcpyDeviceToHost); // printArray(newN, printData); // } cudaMalloc((void**)&dev_odata, fullN * sizeof(int)); if(numBlocks > numThreads) { exclusiveScan(newN, dev_odata, dev_temp, newNumBlocks, numThreads); } else { blockWiseScan<<<newNumBlocks, numThreads>>>(newN, dev_odata, dev_temp); checkCUDAError("BlockWiseScan2"); } updateidata<<<numBlocks, numThreads>>>(n, odata, dev_odata, numThreads); checkCUDAError("updateidata"); // if(DEBUG) // { // std::cout<<"\nupdate idata"; // cudaMemcpy(printData, odata, n * sizeof(int), cudaMemcpyDeviceToHost); // printArray(n, printData); // } cudaFree(dev_temp); cudaFree(dev_odata); delete(printData); } int compact(int n, RayState *idata) { RayState * hst_idata = new RayState[n]; cudaMemcpy(hst_idata, idata, n * sizeof(RayState), cudaMemcpyDeviceToHost); int i, count = 0; for(i=0; i<n; ++i) { if(hst_idata[i].isAlive) count++; } // if(DEBUG) // { // std::cout<<"Count Alive: "<<count<<std::endl; // } int oriN = n; int p = ilog2ceil(n); n = pow(2, p); int numThreads = threadCount, numBlocks = (n + numThreads - 1) / numThreads; RayState *dev_odata; int *dev_k = NULL, *dev_bool = NULL, *dev_temp = NULL; int *printData = new int[n]; cudaMalloc((void**)&dev_k, sizeof(int)); cudaMalloc((void**)&dev_bool, n * sizeof(int)); cudaMalloc((void**)&dev_temp, n * sizeof(int)); StreamCompaction::Common::kernMapToBoolean<<<numBlocks, numThreads>>>(oriN, dev_bool, idata); checkCUDAError("kernMapToBool"); // if(DEBUG) // { // std::cout<<"\nBools : "; // cudaMemcpy(printData, dev_bool, n * sizeof(int), cudaMemcpyDeviceToHost); // printArray(n, printData); // } exclusiveScan(n, dev_temp, dev_bool, numBlocks, numThreads); checkCUDAError("Exclusive Scan"); setK<<<1,1>>>(dev_k, dev_temp, dev_bool, n-1); int *k = new int; cudaMemcpy(k, dev_k, sizeof(int), cudaMemcpyDeviceToHost); cudaMalloc((void**)&dev_odata, (*k) * sizeof(RayState)); StreamCompaction::Common::kernScatter<<<numBlocks, numThreads>>>(n, dev_odata, idata, dev_bool, dev_temp); checkCUDAError("kernScatter"); cudaMemcpy(idata, dev_odata, (*k) * sizeof(RayState), cudaMemcpyDeviceToDevice); // if(DEBUG) // { // std::cout<<"K :"<<*k<<std::endl; // } cudaFree(dev_bool); cudaFree(dev_k); cudaFree(dev_temp); cudaFree(dev_odata); delete(printData); return (*k); } } } namespace StreamCompaction { namespace Common { __global__ void kernMapToBoolean(int n, int *bools, const RayState *idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < n) { bools[index] = (idata[index].isAlive) ? 1 : 0; } else { bools[index] = 0; } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, RayState *odata, const RayState *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < n) { if(bools[index] == 1) { int i = indices[index]; odata[i].isAlive = idata[index].isAlive; odata[i].pixelIndex = idata[index].pixelIndex; odata[i].rayColor = idata[index].rayColor; odata[i].ray.direction = idata[index].ray.direction; odata[i].ray.origin = idata[index].ray.origin; } } } } }
4c951f83edf2e72e17cccaf1749407fd4548eae1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> __global__ void evenReduce(int *a, int *b, int numP) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < numP) b[index] = a[index * 2] + a[index * 2 + 1]; } __global__ void oddReduce(int *a, int *b, int numP) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < numP) { if(index != numP - 1) { b[index] = a[index * 2] + a[index * 2 + 1]; } else { /*puts the remaining value that doesn't have a pair in the right index*/ b[index] = a[index * 2]; } } } #define LENGTH 512 #define BLOCK_THREADS 512 int main() { double length = LENGTH; int numP, l; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /*create arrays for host and GPU*/ int *a, *b, *k_b, *k_a; int size = length * sizeof( int ); a = (int *)malloc( size ); b = (int *)malloc( size ); hipMalloc( (void **) &k_a, size ); hipMalloc( (void **) &k_b, size ); /*initialize the array*/ for( int i = 0; i < length; i++ ) { a[i] = i; b[i] = 0; } /*array debug*/ // printf("A:\n"); // for(int i=0; i< length; i++) // { // printf("%d ", a[i]); // } /* copy inputs to device */ hipMemcpy(k_a, a, size, hipMemcpyHostToDevice ); hipMemcpy(k_b, b, size, hipMemcpyHostToDevice ); dim3 dimGrid( 1, 1 ); dim3 dimBlock(BLOCK_THREADS, 1); /*Since each thread does 2 additions there are log2(N) iterations.*/ int gates = ceil(log(length) / log(2)); hipEventRecord(start); for(int i=0; i < gates; i++) { /*get the number of threads needed. Ceiling used for odd array lengths*/ numP = ceil(length/2); l = (int)length; /*when threads are divisble by 2 use less code...*/ if( l % 2 == 0) hipLaunchKernelGGL(( evenReduce), dim3(dimGrid),dim3(dimBlock), 0, 0, k_a, k_b, numP); else hipLaunchKernelGGL(( oddReduce), dim3(dimGrid),dim3(dimBlock), 0, 0, k_a, k_b, numP); /*last # of threads will equal next array length to compute*/ length = numP; // printf("\niteration %d reduction is\n", i + 1); // hipMemcpy(b, k_b, size, hipMemcpyDeviceToHost ); // for(int i=0; i< N; i++) // { // printf("%d ", b[i]); // } /*send array b's data back to a after each iteration*/ hipMemcpy(k_a, k_b, size, hipMemcpyDeviceToDevice ); } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); /* copy result back to host*/ hipMemcpy(b, k_b, size, hipMemcpyDeviceToHost ); printf("\nFinal reduction is %d\n", b[0]); printf("\nThis took %f milliseconds\n", milliseconds); /* clean up */ free(a); free(b); hipFree( k_a ); hipFree( k_b ); return 0; }
4c951f83edf2e72e17cccaf1749407fd4548eae1.cu
#include <stdio.h> #include <math.h> __global__ void evenReduce(int *a, int *b, int numP) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < numP) b[index] = a[index * 2] + a[index * 2 + 1]; } __global__ void oddReduce(int *a, int *b, int numP) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < numP) { if(index != numP - 1) { b[index] = a[index * 2] + a[index * 2 + 1]; } else { /*puts the remaining value that doesn't have a pair in the right index*/ b[index] = a[index * 2]; } } } #define LENGTH 512 #define BLOCK_THREADS 512 int main() { double length = LENGTH; int numP, l; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*create arrays for host and GPU*/ int *a, *b, *k_b, *k_a; int size = length * sizeof( int ); a = (int *)malloc( size ); b = (int *)malloc( size ); cudaMalloc( (void **) &k_a, size ); cudaMalloc( (void **) &k_b, size ); /*initialize the array*/ for( int i = 0; i < length; i++ ) { a[i] = i; b[i] = 0; } /*array debug*/ // printf("A:\n"); // for(int i=0; i< length; i++) // { // printf("%d ", a[i]); // } /* copy inputs to device */ cudaMemcpy(k_a, a, size, cudaMemcpyHostToDevice ); cudaMemcpy(k_b, b, size, cudaMemcpyHostToDevice ); dim3 dimGrid( 1, 1 ); dim3 dimBlock(BLOCK_THREADS, 1); /*Since each thread does 2 additions there are log2(N) iterations.*/ int gates = ceil(log(length) / log(2)); cudaEventRecord(start); for(int i=0; i < gates; i++) { /*get the number of threads needed. Ceiling used for odd array lengths*/ numP = ceil(length/2); l = (int)length; /*when threads are divisble by 2 use less code...*/ if( l % 2 == 0) evenReduce<<<dimGrid,dimBlock>>>(k_a, k_b, numP); else oddReduce<<<dimGrid,dimBlock>>>(k_a, k_b, numP); /*last # of threads will equal next array length to compute*/ length = numP; // printf("\niteration %d reduction is\n", i + 1); // cudaMemcpy(b, k_b, size, cudaMemcpyDeviceToHost ); // for(int i=0; i< N; i++) // { // printf("%d ", b[i]); // } /*send array b's data back to a after each iteration*/ cudaMemcpy(k_a, k_b, size, cudaMemcpyDeviceToDevice ); } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); /* copy result back to host*/ cudaMemcpy(b, k_b, size, cudaMemcpyDeviceToHost ); printf("\nFinal reduction is %d\n", b[0]); printf("\nThis took %f milliseconds\n", milliseconds); /* clean up */ free(a); free(b); cudaFree( k_a ); cudaFree( k_b ); return 0; }
8e855a362691bc4b5673ee1cb552c69b0140734e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void helloFromGPU(void) { printf("Hello World from GPU!\n"); printf("%d", threadIdx.x); } int main(void) { printf("Hello World from CPU!\n"); hipLaunchKernelGGL(( helloFromGPU) , dim3(1), dim3(10), 0, 0, ); hipDeviceReset(); }
8e855a362691bc4b5673ee1cb552c69b0140734e.cu
#include <stdio.h> __global__ void helloFromGPU(void) { printf("Hello World from GPU!\n"); printf("%d", threadIdx.x); } int main(void) { printf("Hello World from CPU!\n"); helloFromGPU <<<1, 10>>>(); cudaDeviceReset(); }
1e9355923e6cdc53f51d5174cf8c6b11e47bf4e5.hip
// !!! This is a file automatically generated by hipify!!! #include "latms.hpp" #include <cutf/memory.hpp> #include <cutf/type.hpp> #include <cutf/cublas.hpp> #include <cutf/cusolver.hpp> #include <cutf/hiprand.hpp> template <class T> void mtk::utils::latms( T* const mat_ptr, const std::size_t m, const std::size_t n, const std::size_t rank, const T* const s_array, const unsigned long long seed ) { auto cublas_handle = cutf::cublas::get_cublas_unique_ptr(); auto hs_ptr = cutf::memory::get_host_unique_ptr<T>(rank * rank); auto s_ptr = cutf::memory::get_device_unique_ptr<T>(rank * rank); auto u_ptr = cutf::memory::get_device_unique_ptr<T>(rank * m); auto v_ptr = cutf::memory::get_device_unique_ptr<T>(rank * n); auto tmp_ptr = cutf::memory::get_device_unique_ptr<T>(rank * m); auto cugen = cutf::hiprand::get_curand_unique_ptr(HIPRAND_RNG_PSEUDO_MT19937); hiprandSetPseudoRandomGeneratorSeed(*cugen.get(), seed); CUTF_CHECK_ERROR(cutf::hiprand::generate_normal(*cugen.get(), u_ptr.get(), m * rank, 0.0f, 1.0f)); CUTF_CHECK_ERROR(cutf::hiprand::generate_normal(*cugen.get(), v_ptr.get(), n * rank, 0.0f, 1.0f)); auto d_tau = cutf::memory::get_device_unique_ptr<T>(n * n); auto h_a = cutf::memory::get_device_unique_ptr<T>(m * n); auto cusolver = cutf::cusolver::get_cusolver_dn_unique_ptr(); // working memory int u_geqrf_working_memory_size, u_gqr_working_memory_size; CUTF_CHECK_ERROR(cutf::cusolver::dn::geqrf_buffer_size( *cusolver.get(), m, rank, u_ptr.get(), m, &u_geqrf_working_memory_size )); CUTF_CHECK_ERROR(cutf::cusolver::dn::gqr_buffer_size( *cusolver.get(), m, rank, rank, u_ptr.get(), m, d_tau.get(), &u_gqr_working_memory_size )); int v_geqrf_working_memory_size, v_gqr_working_memory_size; CUTF_CHECK_ERROR(cutf::cusolver::dn::geqrf_buffer_size( *cusolver.get(), n, rank, v_ptr.get(), n, &v_geqrf_working_memory_size )); CUTF_CHECK_ERROR(cutf::cusolver::dn::gqr_buffer_size( *cusolver.get(), n, rank, rank, v_ptr.get(), n, d_tau.get(), &v_gqr_working_memory_size )); int geqrf_working_memory_size = ::max(v_geqrf_working_memory_size, u_geqrf_working_memory_size); int gqr_working_memory_size = ::max(v_gqr_working_memory_size, u_gqr_working_memory_size); auto d_geqrf_working_memory = cutf::memory::get_device_unique_ptr<T>(geqrf_working_memory_size); auto d_gqr_working_memory = cutf::memory::get_device_unique_ptr<T>(gqr_working_memory_size); auto d_info = cutf::memory::get_device_unique_ptr<int>(1); CUTF_CHECK_ERROR(cutf::cusolver::dn::geqrf( *cusolver.get(), m, rank, u_ptr.get(), m, d_tau.get(), d_geqrf_working_memory.get(), geqrf_working_memory_size, d_info.get() )); CUTF_CHECK_ERROR(cutf::cusolver::dn::gqr( *cusolver.get(), m, rank, rank, u_ptr.get(), m, d_tau.get(), d_gqr_working_memory.get(), gqr_working_memory_size, d_info.get() )); CUTF_CHECK_ERROR(cutf::cusolver::dn::geqrf( *cusolver.get(), n, rank, v_ptr.get(), n, d_tau.get(), d_geqrf_working_memory.get(), geqrf_working_memory_size, d_info.get() )); CUTF_CHECK_ERROR(cutf::cusolver::dn::gqr( *cusolver.get(), n, rank, rank, v_ptr.get(), n, d_tau.get(), d_gqr_working_memory.get(), gqr_working_memory_size, d_info.get() )); for (std::size_t i = 0; i < rank * rank; i++) { if (i % (rank + 1) == 0) { hs_ptr.get()[i] = s_array[i / (rank + 1)]; } else { hs_ptr.get()[i] = cutf::type::cast<T>(0.0f); } } cutf::memory::copy(s_ptr.get(), hs_ptr.get(), rank * rank); // merge const T one = cutf::type::cast<T>(1.0f); const T zero = cutf::type::cast<T>(0.0f); cutf::cublas::gemm( *cublas_handle.get(), HIPBLAS_OP_N, HIPBLAS_OP_N, m, rank, rank, &one, u_ptr.get(), m, s_ptr.get(), rank, &zero, tmp_ptr.get(), m ); cutf::cublas::gemm( *cublas_handle.get(), HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, rank, &one, tmp_ptr.get(), m, v_ptr.get(), n, &zero, mat_ptr, m ); } template void mtk::utils::latms<float >(float * const, const std::size_t, const std::size_t, const std::size_t, const float * const, const unsigned long long); template void mtk::utils::latms<double>(double* const, const std::size_t, const std::size_t, const std::size_t, const double* const, const unsigned long long); template <class T> T mtk::utils::get_cond( T* const mat, const std::size_t m, const std::size_t n ) { const auto rank = ::min(m, n); auto cusolver = cutf::cusolver::get_cusolver_dn_unique_ptr(); auto dVT = cutf::memory::get_device_unique_ptr<T>(rank * n); auto dS = cutf::memory::get_device_unique_ptr<T>(rank); auto dU = cutf::memory::get_device_unique_ptr<T>(m * rank); auto dInfo = cutf::memory::get_device_unique_ptr<int>(1); int svd_w_size; CUTF_CHECK_ERROR( cutf::cusolver::dn::gesvd_buffer_size<T>( *cusolver.get(), m, n, &svd_w_size ) ); auto dwsvd = cutf::memory::get_device_unique_ptr<T>(svd_w_size); auto dwrsvd = cutf::memory::get_device_unique_ptr<T>(::min(m, n) - 1); CUTF_CHECK_ERROR( cutf::cusolver::dn::gesvd( *cusolver.get(), 'S', 'S', m, n, mat, m, dS.get(), dU.get(), m, dVT.get(), rank, dwsvd.get(), svd_w_size, dwrsvd.get(), dInfo.get() ) ); auto hS = cutf::memory::get_host_unique_ptr<T>(rank); cutf::memory::copy(hS.get(), dS.get(), rank); return hS.get()[0] / hS.get()[rank - 1]; } template float mtk::utils::get_cond(float* const mat, const std::size_t m, const std::size_t n); template double mtk::utils::get_cond(double* const mat, const std::size_t m, const std::size_t n);
1e9355923e6cdc53f51d5174cf8c6b11e47bf4e5.cu
#include "latms.hpp" #include <cutf/memory.hpp> #include <cutf/type.hpp> #include <cutf/cublas.hpp> #include <cutf/cusolver.hpp> #include <cutf/curand.hpp> template <class T> void mtk::utils::latms( T* const mat_ptr, const std::size_t m, const std::size_t n, const std::size_t rank, const T* const s_array, const unsigned long long seed ) { auto cublas_handle = cutf::cublas::get_cublas_unique_ptr(); auto hs_ptr = cutf::memory::get_host_unique_ptr<T>(rank * rank); auto s_ptr = cutf::memory::get_device_unique_ptr<T>(rank * rank); auto u_ptr = cutf::memory::get_device_unique_ptr<T>(rank * m); auto v_ptr = cutf::memory::get_device_unique_ptr<T>(rank * n); auto tmp_ptr = cutf::memory::get_device_unique_ptr<T>(rank * m); auto cugen = cutf::curand::get_curand_unique_ptr(CURAND_RNG_PSEUDO_MT19937); curandSetPseudoRandomGeneratorSeed(*cugen.get(), seed); CUTF_CHECK_ERROR(cutf::curand::generate_normal(*cugen.get(), u_ptr.get(), m * rank, 0.0f, 1.0f)); CUTF_CHECK_ERROR(cutf::curand::generate_normal(*cugen.get(), v_ptr.get(), n * rank, 0.0f, 1.0f)); auto d_tau = cutf::memory::get_device_unique_ptr<T>(n * n); auto h_a = cutf::memory::get_device_unique_ptr<T>(m * n); auto cusolver = cutf::cusolver::get_cusolver_dn_unique_ptr(); // working memory int u_geqrf_working_memory_size, u_gqr_working_memory_size; CUTF_CHECK_ERROR(cutf::cusolver::dn::geqrf_buffer_size( *cusolver.get(), m, rank, u_ptr.get(), m, &u_geqrf_working_memory_size )); CUTF_CHECK_ERROR(cutf::cusolver::dn::gqr_buffer_size( *cusolver.get(), m, rank, rank, u_ptr.get(), m, d_tau.get(), &u_gqr_working_memory_size )); int v_geqrf_working_memory_size, v_gqr_working_memory_size; CUTF_CHECK_ERROR(cutf::cusolver::dn::geqrf_buffer_size( *cusolver.get(), n, rank, v_ptr.get(), n, &v_geqrf_working_memory_size )); CUTF_CHECK_ERROR(cutf::cusolver::dn::gqr_buffer_size( *cusolver.get(), n, rank, rank, v_ptr.get(), n, d_tau.get(), &v_gqr_working_memory_size )); int geqrf_working_memory_size = std::max(v_geqrf_working_memory_size, u_geqrf_working_memory_size); int gqr_working_memory_size = std::max(v_gqr_working_memory_size, u_gqr_working_memory_size); auto d_geqrf_working_memory = cutf::memory::get_device_unique_ptr<T>(geqrf_working_memory_size); auto d_gqr_working_memory = cutf::memory::get_device_unique_ptr<T>(gqr_working_memory_size); auto d_info = cutf::memory::get_device_unique_ptr<int>(1); CUTF_CHECK_ERROR(cutf::cusolver::dn::geqrf( *cusolver.get(), m, rank, u_ptr.get(), m, d_tau.get(), d_geqrf_working_memory.get(), geqrf_working_memory_size, d_info.get() )); CUTF_CHECK_ERROR(cutf::cusolver::dn::gqr( *cusolver.get(), m, rank, rank, u_ptr.get(), m, d_tau.get(), d_gqr_working_memory.get(), gqr_working_memory_size, d_info.get() )); CUTF_CHECK_ERROR(cutf::cusolver::dn::geqrf( *cusolver.get(), n, rank, v_ptr.get(), n, d_tau.get(), d_geqrf_working_memory.get(), geqrf_working_memory_size, d_info.get() )); CUTF_CHECK_ERROR(cutf::cusolver::dn::gqr( *cusolver.get(), n, rank, rank, v_ptr.get(), n, d_tau.get(), d_gqr_working_memory.get(), gqr_working_memory_size, d_info.get() )); for (std::size_t i = 0; i < rank * rank; i++) { if (i % (rank + 1) == 0) { hs_ptr.get()[i] = s_array[i / (rank + 1)]; } else { hs_ptr.get()[i] = cutf::type::cast<T>(0.0f); } } cutf::memory::copy(s_ptr.get(), hs_ptr.get(), rank * rank); // merge const T one = cutf::type::cast<T>(1.0f); const T zero = cutf::type::cast<T>(0.0f); cutf::cublas::gemm( *cublas_handle.get(), CUBLAS_OP_N, CUBLAS_OP_N, m, rank, rank, &one, u_ptr.get(), m, s_ptr.get(), rank, &zero, tmp_ptr.get(), m ); cutf::cublas::gemm( *cublas_handle.get(), CUBLAS_OP_N, CUBLAS_OP_T, m, n, rank, &one, tmp_ptr.get(), m, v_ptr.get(), n, &zero, mat_ptr, m ); } template void mtk::utils::latms<float >(float * const, const std::size_t, const std::size_t, const std::size_t, const float * const, const unsigned long long); template void mtk::utils::latms<double>(double* const, const std::size_t, const std::size_t, const std::size_t, const double* const, const unsigned long long); template <class T> T mtk::utils::get_cond( T* const mat, const std::size_t m, const std::size_t n ) { const auto rank = std::min(m, n); auto cusolver = cutf::cusolver::get_cusolver_dn_unique_ptr(); auto dVT = cutf::memory::get_device_unique_ptr<T>(rank * n); auto dS = cutf::memory::get_device_unique_ptr<T>(rank); auto dU = cutf::memory::get_device_unique_ptr<T>(m * rank); auto dInfo = cutf::memory::get_device_unique_ptr<int>(1); int svd_w_size; CUTF_CHECK_ERROR( cutf::cusolver::dn::gesvd_buffer_size<T>( *cusolver.get(), m, n, &svd_w_size ) ); auto dwsvd = cutf::memory::get_device_unique_ptr<T>(svd_w_size); auto dwrsvd = cutf::memory::get_device_unique_ptr<T>(std::min(m, n) - 1); CUTF_CHECK_ERROR( cutf::cusolver::dn::gesvd( *cusolver.get(), 'S', 'S', m, n, mat, m, dS.get(), dU.get(), m, dVT.get(), rank, dwsvd.get(), svd_w_size, dwrsvd.get(), dInfo.get() ) ); auto hS = cutf::memory::get_host_unique_ptr<T>(rank); cutf::memory::copy(hS.get(), dS.get(), rank); return hS.get()[0] / hS.get()[rank - 1]; } template float mtk::utils::get_cond(float* const mat, const std::size_t m, const std::size_t n); template double mtk::utils::get_cond(double* const mat, const std::size_t m, const std::size_t n);
3ad021a2b0207bd93ae28f7753827bc3942b9be5.hip
// !!! This is a file automatically generated by hipify!!! //Zrdo: http://www.cs.fsu.edu/ #include <stdio.h> // Print device properties void printDevProp(hipDeviceProp_t devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
3ad021a2b0207bd93ae28f7753827bc3942b9be5.cu
//Zródło: http://www.cs.fsu.edu/ #include <stdio.h> // Print device properties void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
e3f544710af3bcf3a28f472c847026aa6cc6a0a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void reduce(float *g_idata, float *g_odata, unsigned int n) { extern __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; float mySum = 0.0f; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds if (i + blockDim.x < n) { mySum += g_idata[i + blockDim.x]; } i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; } }
e3f544710af3bcf3a28f472c847026aa6cc6a0a3.cu
extern "C" __global__ void reduce(float *g_idata, float *g_odata, unsigned int n) { extern __shared__ float sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; float mySum = 0.0f; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds if (i + blockDim.x < n) { mySum += g_idata[i + blockDim.x]; } i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; } }
b9b66f5051ffebec7f4798b1779de31a77dd2fcb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from ztranspose.cu normal z -> s, Fri Jan 30 19:00:09 2015 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_s #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void stranspose_device( int m, int n, const float *A, int lda, float *AT, int ldat) { __shared__ float sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void stranspose_kernel( int m, int n, const float *A, int lda, float *AT, int ldat) { stranspose_device(m, n, A, lda, AT, ldat); } __global__ void stranspose_kernel_batched( int m, int n, float **dA_array, int lda, float **dAT_array, int ldat) { int batchid = blockIdx.z; stranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /** Purpose ------- stranspose_q copies and transposes a matrix dA to matrix dAT. Same as stranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA REAL array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT REAL array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_q( magma_int_t m, magma_int_t n, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB ); hipLaunchKernelGGL(( stranspose_kernel), dim3(grid), dim3(threads), 0, queue , m, n, dA, ldda, dAT, lddat ); } /** @see magmablas_stranspose_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose( magma_int_t m, magma_int_t n, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dAT, magma_int_t lddat ) { magmablas_stranspose_q( m, n, dA, ldda, dAT, lddat, magma_stream ); } /** Purpose ------- stranspose_batched_q copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as stranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array REAL* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array REAL* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_batched_q( magma_int_t m, magma_int_t n, float **dA_array, magma_int_t ldda, float **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB, batchCount ); hipLaunchKernelGGL(( stranspose_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dA_array, ldda, dAT_array, lddat ); } /** @see magmablas_stranspose_batched_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_batched( magma_int_t m, magma_int_t n, float **dA_array, magma_int_t ldda, float **dAT_array, magma_int_t lddat, magma_int_t batchCount ) { magmablas_stranspose_batched_q( m, n, dA_array, ldda, dAT_array, lddat, batchCount, magma_stream ); }
b9b66f5051ffebec7f4798b1779de31a77dd2fcb.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from ztranspose.cu normal z -> s, Fri Jan 30 19:00:09 2015 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_s #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void stranspose_device( int m, int n, const float *A, int lda, float *AT, int ldat) { __shared__ float sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void stranspose_kernel( int m, int n, const float *A, int lda, float *AT, int ldat) { stranspose_device(m, n, A, lda, AT, ldat); } __global__ void stranspose_kernel_batched( int m, int n, float **dA_array, int lda, float **dAT_array, int ldat) { int batchid = blockIdx.z; stranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /** Purpose ------- stranspose_q copies and transposes a matrix dA to matrix dAT. Same as stranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA REAL array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT REAL array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_q( magma_int_t m, magma_int_t n, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB ); stranspose_kernel<<< grid, threads, 0, queue >>> ( m, n, dA, ldda, dAT, lddat ); } /** @see magmablas_stranspose_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose( magma_int_t m, magma_int_t n, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dAT, magma_int_t lddat ) { magmablas_stranspose_q( m, n, dA, ldda, dAT, lddat, magma_stream ); } /** Purpose ------- stranspose_batched_q copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as stranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array REAL* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array REAL* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_batched_q( magma_int_t m, magma_int_t n, float **dA_array, magma_int_t ldda, float **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( (m+NB-1)/NB, (n+NB-1)/NB, batchCount ); stranspose_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dA_array, ldda, dAT_array, lddat ); } /** @see magmablas_stranspose_batched_q @ingroup magma_saux2 ********************************************************************/ extern "C" void magmablas_stranspose_batched( magma_int_t m, magma_int_t n, float **dA_array, magma_int_t ldda, float **dAT_array, magma_int_t lddat, magma_int_t batchCount ) { magmablas_stranspose_batched_q( m, n, dA_array, ldda, dAT_array, lddat, batchCount, magma_stream ); }
ab338b7abe16e2b36e91d6dea902da0b1191dc21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace arb { namespace allen_catalogue { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\ auto* _pp_var_v __attribute__((unused)) = params_.state_vars[1];\ auto* _pp_var_g __attribute__((unused)) = params_.state_vars[2];\ auto* _pp_var_celsius __attribute__((unused)) = params_.state_vars[3];\ auto* _pp_var_mInf __attribute__((unused)) = params_.state_vars[4];\ auto* _pp_var_mTau __attribute__((unused)) = params_.state_vars[5];\ auto* _pp_var_mAlpha __attribute__((unused)) = params_.state_vars[6];\ auto* _pp_var_mBeta __attribute__((unused)) = params_.state_vars[7];\ auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\ auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\ auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __device__ void rates(arb_mechanism_ppack params_, int tid_, arb_value_type v) { PPACK_IFACE_BLOCK; arb_value_type qt; qt = pow( 2.2999999999999998, (_pp_var_celsius[tid_]- 21.0)* 0.10000000000000001); _pp_var_mAlpha[tid_] = 0.0033*exp( 0.10000000000000001*(v- -35.0)); _pp_var_mBeta[tid_] = 0.0033*exp( -0.10000000000000001*(v- -35.0)); _pp_var_mInf[tid_] = _pp_var_mAlpha[tid_]/(_pp_var_mAlpha[tid_]+_pp_var_mBeta[tid_]); _pp_var_mTau[tid_] = 1.0/(_pp_var_mAlpha[tid_]+_pp_var_mBeta[tid_])/qt; } __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type v = _pp_var_vec_v[node_indexi_]; rates(params_, tid_, v); _pp_var_m[tid_] = _pp_var_mInf[tid_]; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type dt = _pp_var_vec_dt[node_indexi_]; arb_value_type v = _pp_var_vec_v[node_indexi_]; arb_value_type b_0_, a_0_, ll0_, ll1_; ll1_ = 0.; ll0_ = 0.; rates(params_, tid_, v); a_0_ = _pp_var_mTau[tid_]; b_0_ = _pp_var_mInf[tid_]; ll0_ = -dt/a_0_; ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_); _pp_var_m[tid_] = b_0_+(_pp_var_m[tid_]-b_0_)*ll1_; } } __global__ void compute_currents(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_k_indexi_ = _pp_var_ion_k_index[tid_]; auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type conductivity_ = 0; arb_value_type current_ = 0; arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_]; arb_value_type v = _pp_var_vec_v[node_indexi_]; arb_value_type ik = 0; _pp_var_g[tid_] = _pp_var_gbar[tid_]*_pp_var_m[tid_]; ik = _pp_var_g[tid_]*(v-ek); current_ = ik; conductivity_ = _pp_var_g[tid_]; _pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]); _pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]); _pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]); } } } // namespace void mechanism_Im_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p); if (!p->multiplicity) return; hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(1}), block_dim, 0, *p); } void mechanism_Im_gpu_compute_currents_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_Im_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_Im_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_Im_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_Im_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace allen_catalogue } // namespace arb
ab338b7abe16e2b36e91d6dea902da0b1191dc21.cu
#include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace arb { namespace allen_catalogue { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\ auto* _pp_var_v __attribute__((unused)) = params_.state_vars[1];\ auto* _pp_var_g __attribute__((unused)) = params_.state_vars[2];\ auto* _pp_var_celsius __attribute__((unused)) = params_.state_vars[3];\ auto* _pp_var_mInf __attribute__((unused)) = params_.state_vars[4];\ auto* _pp_var_mTau __attribute__((unused)) = params_.state_vars[5];\ auto* _pp_var_mAlpha __attribute__((unused)) = params_.state_vars[6];\ auto* _pp_var_mBeta __attribute__((unused)) = params_.state_vars[7];\ auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\ auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\ auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __device__ void rates(arb_mechanism_ppack params_, int tid_, arb_value_type v) { PPACK_IFACE_BLOCK; arb_value_type qt; qt = pow( 2.2999999999999998, (_pp_var_celsius[tid_]- 21.0)* 0.10000000000000001); _pp_var_mAlpha[tid_] = 0.0033*exp( 0.10000000000000001*(v- -35.0)); _pp_var_mBeta[tid_] = 0.0033*exp( -0.10000000000000001*(v- -35.0)); _pp_var_mInf[tid_] = _pp_var_mAlpha[tid_]/(_pp_var_mAlpha[tid_]+_pp_var_mBeta[tid_]); _pp_var_mTau[tid_] = 1.0/(_pp_var_mAlpha[tid_]+_pp_var_mBeta[tid_])/qt; } __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type v = _pp_var_vec_v[node_indexi_]; rates(params_, tid_, v); _pp_var_m[tid_] = _pp_var_mInf[tid_]; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type dt = _pp_var_vec_dt[node_indexi_]; arb_value_type v = _pp_var_vec_v[node_indexi_]; arb_value_type b_0_, a_0_, ll0_, ll1_; ll1_ = 0.; ll0_ = 0.; rates(params_, tid_, v); a_0_ = _pp_var_mTau[tid_]; b_0_ = _pp_var_mInf[tid_]; ll0_ = -dt/a_0_; ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_); _pp_var_m[tid_] = b_0_+(_pp_var_m[tid_]-b_0_)*ll1_; } } __global__ void compute_currents(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto ion_k_indexi_ = _pp_var_ion_k_index[tid_]; auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type conductivity_ = 0; arb_value_type current_ = 0; arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_]; arb_value_type v = _pp_var_vec_v[node_indexi_]; arb_value_type ik = 0; _pp_var_g[tid_] = _pp_var_gbar[tid_]*_pp_var_m[tid_]; ik = _pp_var_g[tid_]*(v-ek); current_ = ik; conductivity_ = _pp_var_g[tid_]; _pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]); _pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]); _pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]); } } } // namespace void mechanism_Im_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); init<<<grid_dim, block_dim>>>(*p); if (!p->multiplicity) return; multiply<<<dim3{grid_dim, 1}, block_dim>>>(*p); } void mechanism_Im_gpu_compute_currents_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); compute_currents<<<grid_dim, block_dim>>>(*p); } void mechanism_Im_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); advance_state<<<grid_dim, block_dim>>>(*p); } void mechanism_Im_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_Im_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_Im_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace allen_catalogue } // namespace arb